diff mbox series

[net-next,3/8] virtio_net: introduce virtnet_xdp_handler() to seprate the logic of run xdp

Message ID 20230328120412.110114-4-xuanzhuo@linux.alibaba.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series virtio_net: refactor xdp codes | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 18 this patch: 18
netdev/cc_maintainers success CCed 13 of 13 maintainers
netdev/build_clang success Errors and warnings before: 18 this patch: 18
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 18 this patch: 18
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 203 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Xuan Zhuo March 28, 2023, 12:04 p.m. UTC
At present, we have two similar logic to perform the XDP prog.

Therefore, this PATCH separates the code of executing XDP, which is
conducive to later maintenance.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 142 +++++++++++++++++++++------------------
 1 file changed, 75 insertions(+), 67 deletions(-)

Comments

Jason Wang April 3, 2023, 2:43 a.m. UTC | #1
On Tue, Mar 28, 2023 at 8:04 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> At present, we have two similar logic to perform the XDP prog.
>
> Therefore, this PATCH separates the code of executing XDP, which is
> conducive to later maintenance.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
>  drivers/net/virtio_net.c | 142 +++++++++++++++++++++------------------
>  1 file changed, 75 insertions(+), 67 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index bb426958cdd4..72b9d6ee4024 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -301,6 +301,15 @@ struct padded_vnet_hdr {
>         char padding[12];
>  };
>
> +enum {
> +       /* xdp pass */
> +       VIRTNET_XDP_RES_PASS,
> +       /* drop packet. the caller needs to release the page. */
> +       VIRTNET_XDP_RES_DROP,
> +       /* packet is consumed by xdp. the caller needs to do nothing. */
> +       VIRTNET_XDP_RES_CONSUMED,
> +};

I'd prefer this to be done on top unless it is a must. But I don't see
any advantage of introducing this, it's partial mapping of XDP action
and it needs to be extended when XDP action is extended. (And we've
already had: VIRTIO_XDP_REDIR and VIRTIO_XDP_TX ...)

> +
>  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
>  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
>
> @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev,
>         return ret;
>  }
>
> +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> +                              struct net_device *dev,
> +                              unsigned int *xdp_xmit,
> +                              struct virtnet_rq_stats *stats)
> +{
> +       struct xdp_frame *xdpf;
> +       int err;
> +       u32 act;
> +
> +       act = bpf_prog_run_xdp(xdp_prog, xdp);
> +       stats->xdp_packets++;
> +
> +       switch (act) {
> +       case XDP_PASS:
> +               return VIRTNET_XDP_RES_PASS;
> +
> +       case XDP_TX:
> +               stats->xdp_tx++;
> +               xdpf = xdp_convert_buff_to_frame(xdp);
> +               if (unlikely(!xdpf))
> +                       return VIRTNET_XDP_RES_DROP;
> +
> +               err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> +               if (unlikely(!err)) {
> +                       xdp_return_frame_rx_napi(xdpf);
> +               } else if (unlikely(err < 0)) {
> +                       trace_xdp_exception(dev, xdp_prog, act);
> +                       return VIRTNET_XDP_RES_DROP;
> +               }
> +
> +               *xdp_xmit |= VIRTIO_XDP_TX;
> +               return VIRTNET_XDP_RES_CONSUMED;
> +
> +       case XDP_REDIRECT:
> +               stats->xdp_redirects++;
> +               err = xdp_do_redirect(dev, xdp, xdp_prog);
> +               if (err)
> +                       return VIRTNET_XDP_RES_DROP;
> +
> +               *xdp_xmit |= VIRTIO_XDP_REDIR;
> +               return VIRTNET_XDP_RES_CONSUMED;
> +
> +       default:
> +               bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
> +               fallthrough;
> +       case XDP_ABORTED:
> +               trace_xdp_exception(dev, xdp_prog, act);
> +               fallthrough;
> +       case XDP_DROP:
> +               return VIRTNET_XDP_RES_DROP;
> +       }
> +}
> +
>  static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
>  {
>         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
> @@ -876,7 +938,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
>         struct page *page = virt_to_head_page(buf);
>         unsigned int delta = 0;
>         struct page *xdp_page;
> -       int err;
>         unsigned int metasize = 0;
>
>         len -= vi->hdr_len;
> @@ -898,7 +959,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
>         xdp_prog = rcu_dereference(rq->xdp_prog);
>         if (xdp_prog) {
>                 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
> -               struct xdp_frame *xdpf;
>                 struct xdp_buff xdp;
>                 void *orig_data;
>                 u32 act;
> @@ -931,46 +991,22 @@ static struct sk_buff *receive_small(struct net_device *dev,
>                 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
>                                  xdp_headroom, len, true);
>                 orig_data = xdp.data;
> -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> -               stats->xdp_packets++;
> +
> +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
>
>                 switch (act) {
> -               case XDP_PASS:
> +               case VIRTNET_XDP_RES_PASS:
>                         /* Recalculate length in case bpf program changed it */
>                         delta = orig_data - xdp.data;
>                         len = xdp.data_end - xdp.data;
>                         metasize = xdp.data - xdp.data_meta;
>                         break;
> -               case XDP_TX:
> -                       stats->xdp_tx++;
> -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> -                       if (unlikely(!xdpf))
> -                               goto err_xdp;
> -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> -                       if (unlikely(!err)) {
> -                               xdp_return_frame_rx_napi(xdpf);
> -                       } else if (unlikely(err < 0)) {
> -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> -                               goto err_xdp;
> -                       }
> -                       *xdp_xmit |= VIRTIO_XDP_TX;
> -                       rcu_read_unlock();
> -                       goto xdp_xmit;
> -               case XDP_REDIRECT:
> -                       stats->xdp_redirects++;
> -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> -                       if (err)
> -                               goto err_xdp;
> -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> +
> +               case VIRTNET_XDP_RES_CONSUMED:
>                         rcu_read_unlock();
>                         goto xdp_xmit;
> -               default:
> -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> -                       fallthrough;
> -               case XDP_ABORTED:
> -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> -                       goto err_xdp;
> -               case XDP_DROP:
> +
> +               case VIRTNET_XDP_RES_DROP:
>                         goto err_xdp;
>                 }
>         }
> @@ -1277,7 +1313,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>         if (xdp_prog) {
>                 unsigned int xdp_frags_truesz = 0;
>                 struct skb_shared_info *shinfo;
> -               struct xdp_frame *xdpf;
>                 struct page *xdp_page;
>                 struct xdp_buff xdp;
>                 void *data;
> @@ -1294,49 +1329,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>                 if (unlikely(err))
>                         goto err_xdp_frags;
>
> -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> -               stats->xdp_packets++;
> +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
>
>                 switch (act) {
> -               case XDP_PASS:
> +               case VIRTNET_XDP_RES_PASS:
>                         head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
>                         if (unlikely(!head_skb))
>                                 goto err_xdp_frags;
>
>                         rcu_read_unlock();
>                         return head_skb;
> -               case XDP_TX:
> -                       stats->xdp_tx++;
> -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> -                       if (unlikely(!xdpf)) {
> -                               netdev_dbg(dev, "convert buff to frame failed for xdp\n");

Nit: This debug is lost after the conversion.

Thanks

> -                               goto err_xdp_frags;
> -                       }
> -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> -                       if (unlikely(!err)) {
> -                               xdp_return_frame_rx_napi(xdpf);
> -                       } else if (unlikely(err < 0)) {
> -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> -                               goto err_xdp_frags;
> -                       }
> -                       *xdp_xmit |= VIRTIO_XDP_TX;
> -                       rcu_read_unlock();
> -                       goto xdp_xmit;
> -               case XDP_REDIRECT:
> -                       stats->xdp_redirects++;
> -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> -                       if (err)
> -                               goto err_xdp_frags;
> -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> +
> +               case VIRTNET_XDP_RES_CONSUMED:
>                         rcu_read_unlock();
>                         goto xdp_xmit;
> -               default:
> -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> -                       fallthrough;
> -               case XDP_ABORTED:
> -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> -                       fallthrough;
> -               case XDP_DROP:
> +
> +               case VIRTNET_XDP_RES_DROP:
>                         goto err_xdp_frags;
>                 }
>  err_xdp_frags:
> --
> 2.32.0.3.g01195cf9f
>
Xuan Zhuo April 3, 2023, 4:12 a.m. UTC | #2
On Mon, 3 Apr 2023 10:43:03 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Tue, Mar 28, 2023 at 8:04 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > At present, we have two similar logic to perform the XDP prog.
> >
> > Therefore, this PATCH separates the code of executing XDP, which is
> > conducive to later maintenance.
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >  drivers/net/virtio_net.c | 142 +++++++++++++++++++++------------------
> >  1 file changed, 75 insertions(+), 67 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index bb426958cdd4..72b9d6ee4024 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -301,6 +301,15 @@ struct padded_vnet_hdr {
> >         char padding[12];
> >  };
> >
> > +enum {
> > +       /* xdp pass */
> > +       VIRTNET_XDP_RES_PASS,
> > +       /* drop packet. the caller needs to release the page. */
> > +       VIRTNET_XDP_RES_DROP,
> > +       /* packet is consumed by xdp. the caller needs to do nothing. */
> > +       VIRTNET_XDP_RES_CONSUMED,
> > +};
>
> I'd prefer this to be done on top unless it is a must. But I don't see
> any advantage of introducing this, it's partial mapping of XDP action
> and it needs to be extended when XDP action is extended. (And we've
> already had: VIRTIO_XDP_REDIR and VIRTIO_XDP_TX ...)

No, these are the three states of buffer after XDP processing.

* PASS: goto make skb
* DROP: we should release buffer
* CUNSUMED: xdp prog used the buffer, we do nothing

The latter two are not particularly related to XDP ACTION. And it does not need
to extend when XDP action is extended. At least I have not thought of this
situation.


>
> > +
> >  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> >
> > @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> >         return ret;
> >  }
> >
> > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> > +                              struct net_device *dev,
> > +                              unsigned int *xdp_xmit,
> > +                              struct virtnet_rq_stats *stats)
> > +{
> > +       struct xdp_frame *xdpf;
> > +       int err;
> > +       u32 act;
> > +
> > +       act = bpf_prog_run_xdp(xdp_prog, xdp);
> > +       stats->xdp_packets++;
> > +
> > +       switch (act) {
> > +       case XDP_PASS:
> > +               return VIRTNET_XDP_RES_PASS;
> > +
> > +       case XDP_TX:
> > +               stats->xdp_tx++;
> > +               xdpf = xdp_convert_buff_to_frame(xdp);
> > +               if (unlikely(!xdpf))
> > +                       return VIRTNET_XDP_RES_DROP;
> > +
> > +               err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > +               if (unlikely(!err)) {
> > +                       xdp_return_frame_rx_napi(xdpf);
> > +               } else if (unlikely(err < 0)) {
> > +                       trace_xdp_exception(dev, xdp_prog, act);
> > +                       return VIRTNET_XDP_RES_DROP;
> > +               }
> > +
> > +               *xdp_xmit |= VIRTIO_XDP_TX;
> > +               return VIRTNET_XDP_RES_CONSUMED;
> > +
> > +       case XDP_REDIRECT:
> > +               stats->xdp_redirects++;
> > +               err = xdp_do_redirect(dev, xdp, xdp_prog);
> > +               if (err)
> > +                       return VIRTNET_XDP_RES_DROP;
> > +
> > +               *xdp_xmit |= VIRTIO_XDP_REDIR;
> > +               return VIRTNET_XDP_RES_CONSUMED;
> > +
> > +       default:
> > +               bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
> > +               fallthrough;
> > +       case XDP_ABORTED:
> > +               trace_xdp_exception(dev, xdp_prog, act);
> > +               fallthrough;
> > +       case XDP_DROP:
> > +               return VIRTNET_XDP_RES_DROP;
> > +       }
> > +}
> > +
> >  static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
> >  {
> >         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
> > @@ -876,7 +938,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> >         struct page *page = virt_to_head_page(buf);
> >         unsigned int delta = 0;
> >         struct page *xdp_page;
> > -       int err;
> >         unsigned int metasize = 0;
> >
> >         len -= vi->hdr_len;
> > @@ -898,7 +959,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> >         xdp_prog = rcu_dereference(rq->xdp_prog);
> >         if (xdp_prog) {
> >                 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
> > -               struct xdp_frame *xdpf;
> >                 struct xdp_buff xdp;
> >                 void *orig_data;
> >                 u32 act;
> > @@ -931,46 +991,22 @@ static struct sk_buff *receive_small(struct net_device *dev,
> >                 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
> >                                  xdp_headroom, len, true);
> >                 orig_data = xdp.data;
> > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > -               stats->xdp_packets++;
> > +
> > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> >
> >                 switch (act) {
> > -               case XDP_PASS:
> > +               case VIRTNET_XDP_RES_PASS:
> >                         /* Recalculate length in case bpf program changed it */
> >                         delta = orig_data - xdp.data;
> >                         len = xdp.data_end - xdp.data;
> >                         metasize = xdp.data - xdp.data_meta;
> >                         break;
> > -               case XDP_TX:
> > -                       stats->xdp_tx++;
> > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > -                       if (unlikely(!xdpf))
> > -                               goto err_xdp;
> > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > -                       if (unlikely(!err)) {
> > -                               xdp_return_frame_rx_napi(xdpf);
> > -                       } else if (unlikely(err < 0)) {
> > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > -                               goto err_xdp;
> > -                       }
> > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > -                       rcu_read_unlock();
> > -                       goto xdp_xmit;
> > -               case XDP_REDIRECT:
> > -                       stats->xdp_redirects++;
> > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > -                       if (err)
> > -                               goto err_xdp;
> > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > +
> > +               case VIRTNET_XDP_RES_CONSUMED:
> >                         rcu_read_unlock();
> >                         goto xdp_xmit;
> > -               default:
> > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > -                       fallthrough;
> > -               case XDP_ABORTED:
> > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > -                       goto err_xdp;
> > -               case XDP_DROP:
> > +
> > +               case VIRTNET_XDP_RES_DROP:
> >                         goto err_xdp;
> >                 }
> >         }
> > @@ -1277,7 +1313,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> >         if (xdp_prog) {
> >                 unsigned int xdp_frags_truesz = 0;
> >                 struct skb_shared_info *shinfo;
> > -               struct xdp_frame *xdpf;
> >                 struct page *xdp_page;
> >                 struct xdp_buff xdp;
> >                 void *data;
> > @@ -1294,49 +1329,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> >                 if (unlikely(err))
> >                         goto err_xdp_frags;
> >
> > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > -               stats->xdp_packets++;
> > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> >
> >                 switch (act) {
> > -               case XDP_PASS:
> > +               case VIRTNET_XDP_RES_PASS:
> >                         head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
> >                         if (unlikely(!head_skb))
> >                                 goto err_xdp_frags;
> >
> >                         rcu_read_unlock();
> >                         return head_skb;
> > -               case XDP_TX:
> > -                       stats->xdp_tx++;
> > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > -                       if (unlikely(!xdpf)) {
> > -                               netdev_dbg(dev, "convert buff to frame failed for xdp\n");
>
> Nit: This debug is lost after the conversion.

Will fix.

Thanks.

>
> Thanks
>
> > -                               goto err_xdp_frags;
> > -                       }
> > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > -                       if (unlikely(!err)) {
> > -                               xdp_return_frame_rx_napi(xdpf);
> > -                       } else if (unlikely(err < 0)) {
> > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > -                               goto err_xdp_frags;
> > -                       }
> > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > -                       rcu_read_unlock();
> > -                       goto xdp_xmit;
> > -               case XDP_REDIRECT:
> > -                       stats->xdp_redirects++;
> > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > -                       if (err)
> > -                               goto err_xdp_frags;
> > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > +
> > +               case VIRTNET_XDP_RES_CONSUMED:
> >                         rcu_read_unlock();
> >                         goto xdp_xmit;
> > -               default:
> > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > -                       fallthrough;
> > -               case XDP_ABORTED:
> > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > -                       fallthrough;
> > -               case XDP_DROP:
> > +
> > +               case VIRTNET_XDP_RES_DROP:
> >                         goto err_xdp_frags;
> >                 }
> >  err_xdp_frags:
> > --
> > 2.32.0.3.g01195cf9f
> >
>
Jason Wang April 4, 2023, 5:04 a.m. UTC | #3
On Mon, Apr 3, 2023 at 12:17 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Mon, 3 Apr 2023 10:43:03 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Tue, Mar 28, 2023 at 8:04 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > At present, we have two similar logic to perform the XDP prog.
> > >
> > > Therefore, this PATCH separates the code of executing XDP, which is
> > > conducive to later maintenance.
> > >
> > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > ---
> > >  drivers/net/virtio_net.c | 142 +++++++++++++++++++++------------------
> > >  1 file changed, 75 insertions(+), 67 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index bb426958cdd4..72b9d6ee4024 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -301,6 +301,15 @@ struct padded_vnet_hdr {
> > >         char padding[12];
> > >  };
> > >
> > > +enum {
> > > +       /* xdp pass */
> > > +       VIRTNET_XDP_RES_PASS,
> > > +       /* drop packet. the caller needs to release the page. */
> > > +       VIRTNET_XDP_RES_DROP,
> > > +       /* packet is consumed by xdp. the caller needs to do nothing. */
> > > +       VIRTNET_XDP_RES_CONSUMED,
> > > +};
> >
> > I'd prefer this to be done on top unless it is a must. But I don't see
> > any advantage of introducing this, it's partial mapping of XDP action
> > and it needs to be extended when XDP action is extended. (And we've
> > already had: VIRTIO_XDP_REDIR and VIRTIO_XDP_TX ...)
>
> No, these are the three states of buffer after XDP processing.
>
> * PASS: goto make skb

XDP_PASS goes for this.

> * DROP: we should release buffer

XDP_DROP and error conditions go with this.

> * CUNSUMED: xdp prog used the buffer, we do nothing

XDP_TX/XDP_REDIRECTION goes for this.

So t virtnet_xdp_handler() just maps XDP ACTION plus the error
conditions to the above three states.

We can simply map error to XDP_DROP like:

       case XDP_TX:
              stats->xdp_tx++;
               xdpf = xdp_convert_buff_to_frame(xdp);
               if (unlikely(!xdpf))
                       return XDP_DROP;

A good side effect is to avoid the xdp_xmit pointer to be passed to
the function.

>
> The latter two are not particularly related to XDP ACTION. And it does not need
> to extend when XDP action is extended. At least I have not thought of this
> situation.

What's the advantages of such indirection compared to using XDP action directly?

Thanks

>
>
> >
> > > +
> > >  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > >
> > > @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> > >         return ret;
> > >  }
> > >
> > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> > > +                              struct net_device *dev,
> > > +                              unsigned int *xdp_xmit,
> > > +                              struct virtnet_rq_stats *stats)
> > > +{
> > > +       struct xdp_frame *xdpf;
> > > +       int err;
> > > +       u32 act;
> > > +
> > > +       act = bpf_prog_run_xdp(xdp_prog, xdp);
> > > +       stats->xdp_packets++;
> > > +
> > > +       switch (act) {
> > > +       case XDP_PASS:
> > > +               return VIRTNET_XDP_RES_PASS;
> > > +
> > > +       case XDP_TX:
> > > +               stats->xdp_tx++;
> > > +               xdpf = xdp_convert_buff_to_frame(xdp);
> > > +               if (unlikely(!xdpf))
> > > +                       return VIRTNET_XDP_RES_DROP;
> > > +
> > > +               err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > +               if (unlikely(!err)) {
> > > +                       xdp_return_frame_rx_napi(xdpf);
> > > +               } else if (unlikely(err < 0)) {
> > > +                       trace_xdp_exception(dev, xdp_prog, act);
> > > +                       return VIRTNET_XDP_RES_DROP;
> > > +               }
> > > +
> > > +               *xdp_xmit |= VIRTIO_XDP_TX;
> > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > +
> > > +       case XDP_REDIRECT:
> > > +               stats->xdp_redirects++;
> > > +               err = xdp_do_redirect(dev, xdp, xdp_prog);
> > > +               if (err)
> > > +                       return VIRTNET_XDP_RES_DROP;
> > > +
> > > +               *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > +
> > > +       default:
> > > +               bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
> > > +               fallthrough;
> > > +       case XDP_ABORTED:
> > > +               trace_xdp_exception(dev, xdp_prog, act);
> > > +               fallthrough;
> > > +       case XDP_DROP:
> > > +               return VIRTNET_XDP_RES_DROP;
> > > +       }
> > > +}
> > > +
> > >  static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
> > >  {
> > >         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
> > > @@ -876,7 +938,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > >         struct page *page = virt_to_head_page(buf);
> > >         unsigned int delta = 0;
> > >         struct page *xdp_page;
> > > -       int err;
> > >         unsigned int metasize = 0;
> > >
> > >         len -= vi->hdr_len;
> > > @@ -898,7 +959,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > >         xdp_prog = rcu_dereference(rq->xdp_prog);
> > >         if (xdp_prog) {
> > >                 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
> > > -               struct xdp_frame *xdpf;
> > >                 struct xdp_buff xdp;
> > >                 void *orig_data;
> > >                 u32 act;
> > > @@ -931,46 +991,22 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > >                 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
> > >                                  xdp_headroom, len, true);
> > >                 orig_data = xdp.data;
> > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > -               stats->xdp_packets++;
> > > +
> > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > >
> > >                 switch (act) {
> > > -               case XDP_PASS:
> > > +               case VIRTNET_XDP_RES_PASS:
> > >                         /* Recalculate length in case bpf program changed it */
> > >                         delta = orig_data - xdp.data;
> > >                         len = xdp.data_end - xdp.data;
> > >                         metasize = xdp.data - xdp.data_meta;
> > >                         break;
> > > -               case XDP_TX:
> > > -                       stats->xdp_tx++;
> > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > -                       if (unlikely(!xdpf))
> > > -                               goto err_xdp;
> > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > -                       if (unlikely(!err)) {
> > > -                               xdp_return_frame_rx_napi(xdpf);
> > > -                       } else if (unlikely(err < 0)) {
> > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > -                               goto err_xdp;
> > > -                       }
> > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > -                       rcu_read_unlock();
> > > -                       goto xdp_xmit;
> > > -               case XDP_REDIRECT:
> > > -                       stats->xdp_redirects++;
> > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > -                       if (err)
> > > -                               goto err_xdp;
> > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > +
> > > +               case VIRTNET_XDP_RES_CONSUMED:
> > >                         rcu_read_unlock();
> > >                         goto xdp_xmit;
> > > -               default:
> > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > -                       fallthrough;
> > > -               case XDP_ABORTED:
> > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > -                       goto err_xdp;
> > > -               case XDP_DROP:
> > > +
> > > +               case VIRTNET_XDP_RES_DROP:
> > >                         goto err_xdp;
> > >                 }
> > >         }
> > > @@ -1277,7 +1313,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > >         if (xdp_prog) {
> > >                 unsigned int xdp_frags_truesz = 0;
> > >                 struct skb_shared_info *shinfo;
> > > -               struct xdp_frame *xdpf;
> > >                 struct page *xdp_page;
> > >                 struct xdp_buff xdp;
> > >                 void *data;
> > > @@ -1294,49 +1329,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > >                 if (unlikely(err))
> > >                         goto err_xdp_frags;
> > >
> > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > -               stats->xdp_packets++;
> > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > >
> > >                 switch (act) {
> > > -               case XDP_PASS:
> > > +               case VIRTNET_XDP_RES_PASS:
> > >                         head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
> > >                         if (unlikely(!head_skb))
> > >                                 goto err_xdp_frags;
> > >
> > >                         rcu_read_unlock();
> > >                         return head_skb;
> > > -               case XDP_TX:
> > > -                       stats->xdp_tx++;
> > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > -                       if (unlikely(!xdpf)) {
> > > -                               netdev_dbg(dev, "convert buff to frame failed for xdp\n");
> >
> > Nit: This debug is lost after the conversion.
>
> Will fix.
>
> Thanks.
>
> >
> > Thanks
> >
> > > -                               goto err_xdp_frags;
> > > -                       }
> > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > -                       if (unlikely(!err)) {
> > > -                               xdp_return_frame_rx_napi(xdpf);
> > > -                       } else if (unlikely(err < 0)) {
> > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > -                               goto err_xdp_frags;
> > > -                       }
> > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > -                       rcu_read_unlock();
> > > -                       goto xdp_xmit;
> > > -               case XDP_REDIRECT:
> > > -                       stats->xdp_redirects++;
> > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > -                       if (err)
> > > -                               goto err_xdp_frags;
> > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > +
> > > +               case VIRTNET_XDP_RES_CONSUMED:
> > >                         rcu_read_unlock();
> > >                         goto xdp_xmit;
> > > -               default:
> > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > -                       fallthrough;
> > > -               case XDP_ABORTED:
> > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > -                       fallthrough;
> > > -               case XDP_DROP:
> > > +
> > > +               case VIRTNET_XDP_RES_DROP:
> > >                         goto err_xdp_frags;
> > >                 }
> > >  err_xdp_frags:
> > > --
> > > 2.32.0.3.g01195cf9f
> > >
> >
>
Xuan Zhuo April 4, 2023, 6:11 a.m. UTC | #4
On Tue, 4 Apr 2023 13:04:02 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Mon, Apr 3, 2023 at 12:17 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Mon, 3 Apr 2023 10:43:03 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > On Tue, Mar 28, 2023 at 8:04 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > >
> > > > At present, we have two similar logic to perform the XDP prog.
> > > >
> > > > Therefore, this PATCH separates the code of executing XDP, which is
> > > > conducive to later maintenance.
> > > >
> > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > ---
> > > >  drivers/net/virtio_net.c | 142 +++++++++++++++++++++------------------
> > > >  1 file changed, 75 insertions(+), 67 deletions(-)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index bb426958cdd4..72b9d6ee4024 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -301,6 +301,15 @@ struct padded_vnet_hdr {
> > > >         char padding[12];
> > > >  };
> > > >
> > > > +enum {
> > > > +       /* xdp pass */
> > > > +       VIRTNET_XDP_RES_PASS,
> > > > +       /* drop packet. the caller needs to release the page. */
> > > > +       VIRTNET_XDP_RES_DROP,
> > > > +       /* packet is consumed by xdp. the caller needs to do nothing. */
> > > > +       VIRTNET_XDP_RES_CONSUMED,
> > > > +};
> > >
> > > I'd prefer this to be done on top unless it is a must. But I don't see
> > > any advantage of introducing this, it's partial mapping of XDP action
> > > and it needs to be extended when XDP action is extended. (And we've
> > > already had: VIRTIO_XDP_REDIR and VIRTIO_XDP_TX ...)
> >
> > No, these are the three states of buffer after XDP processing.
> >
> > * PASS: goto make skb
>
> XDP_PASS goes for this.
>
> > * DROP: we should release buffer
>
> XDP_DROP and error conditions go with this.
>
> > * CUNSUMED: xdp prog used the buffer, we do nothing
>
> XDP_TX/XDP_REDIRECTION goes for this.
>
> So t virtnet_xdp_handler() just maps XDP ACTION plus the error
> conditions to the above three states.
>
> We can simply map error to XDP_DROP like:
>
>        case XDP_TX:
>               stats->xdp_tx++;
>                xdpf = xdp_convert_buff_to_frame(xdp);
>                if (unlikely(!xdpf))
>                        return XDP_DROP;
>
> A good side effect is to avoid the xdp_xmit pointer to be passed to
> the function.


So, I guess you mean this:

	switch (act) {
	case XDP_PASS:
		/* handle pass */
		return skb;

	case XDP_TX:
		*xdp_xmit |= VIRTIO_XDP_TX;
		goto xmit;

	case XDP_REDIRECT:
		*xdp_xmit |= VIRTIO_XDP_REDIR;
		goto xmit;

	case XDP_DROP:
	default:
		goto err_xdp;
	}

I have to say there is no problem from the perspective of code implementation.

But if the a new ACTION liking XDP_TX,XDP_REDIRECT is added in the future, then
we must modify all the callers. This is the benefit of using CUNSUMED.

I think it is a good advantage to put xdp_xmit in virtnet_xdp_handler(),
which makes the caller not care too much about these details. If you take into
account the problem of increasing the number of parameters, I advise to put it
in rq.

Thanks.



>
> >
> > The latter two are not particularly related to XDP ACTION. And it does not need
> > to extend when XDP action is extended. At least I have not thought of this
> > situation.
>
> What's the advantages of such indirection compared to using XDP action directly?
>
> Thanks
>
> >
> >
> > >
> > > > +
> > > >  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > >
> > > > @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> > > >         return ret;
> > > >  }
> > > >
> > > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> > > > +                              struct net_device *dev,
> > > > +                              unsigned int *xdp_xmit,
> > > > +                              struct virtnet_rq_stats *stats)
> > > > +{
> > > > +       struct xdp_frame *xdpf;
> > > > +       int err;
> > > > +       u32 act;
> > > > +
> > > > +       act = bpf_prog_run_xdp(xdp_prog, xdp);
> > > > +       stats->xdp_packets++;
> > > > +
> > > > +       switch (act) {
> > > > +       case XDP_PASS:
> > > > +               return VIRTNET_XDP_RES_PASS;
> > > > +
> > > > +       case XDP_TX:
> > > > +               stats->xdp_tx++;
> > > > +               xdpf = xdp_convert_buff_to_frame(xdp);
> > > > +               if (unlikely(!xdpf))
> > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > +
> > > > +               err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > +               if (unlikely(!err)) {
> > > > +                       xdp_return_frame_rx_napi(xdpf);
> > > > +               } else if (unlikely(err < 0)) {
> > > > +                       trace_xdp_exception(dev, xdp_prog, act);
> > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > +               }
> > > > +
> > > > +               *xdp_xmit |= VIRTIO_XDP_TX;
> > > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > > +
> > > > +       case XDP_REDIRECT:
> > > > +               stats->xdp_redirects++;
> > > > +               err = xdp_do_redirect(dev, xdp, xdp_prog);
> > > > +               if (err)
> > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > +
> > > > +               *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > > +
> > > > +       default:
> > > > +               bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
> > > > +               fallthrough;
> > > > +       case XDP_ABORTED:
> > > > +               trace_xdp_exception(dev, xdp_prog, act);
> > > > +               fallthrough;
> > > > +       case XDP_DROP:
> > > > +               return VIRTNET_XDP_RES_DROP;
> > > > +       }
> > > > +}
> > > > +
> > > >  static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
> > > >  {
> > > >         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
> > > > @@ -876,7 +938,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > >         struct page *page = virt_to_head_page(buf);
> > > >         unsigned int delta = 0;
> > > >         struct page *xdp_page;
> > > > -       int err;
> > > >         unsigned int metasize = 0;
> > > >
> > > >         len -= vi->hdr_len;
> > > > @@ -898,7 +959,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > >         xdp_prog = rcu_dereference(rq->xdp_prog);
> > > >         if (xdp_prog) {
> > > >                 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
> > > > -               struct xdp_frame *xdpf;
> > > >                 struct xdp_buff xdp;
> > > >                 void *orig_data;
> > > >                 u32 act;
> > > > @@ -931,46 +991,22 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > >                 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
> > > >                                  xdp_headroom, len, true);
> > > >                 orig_data = xdp.data;
> > > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > > -               stats->xdp_packets++;
> > > > +
> > > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > >
> > > >                 switch (act) {
> > > > -               case XDP_PASS:
> > > > +               case VIRTNET_XDP_RES_PASS:
> > > >                         /* Recalculate length in case bpf program changed it */
> > > >                         delta = orig_data - xdp.data;
> > > >                         len = xdp.data_end - xdp.data;
> > > >                         metasize = xdp.data - xdp.data_meta;
> > > >                         break;
> > > > -               case XDP_TX:
> > > > -                       stats->xdp_tx++;
> > > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > > -                       if (unlikely(!xdpf))
> > > > -                               goto err_xdp;
> > > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > -                       if (unlikely(!err)) {
> > > > -                               xdp_return_frame_rx_napi(xdpf);
> > > > -                       } else if (unlikely(err < 0)) {
> > > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > -                               goto err_xdp;
> > > > -                       }
> > > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > > -                       rcu_read_unlock();
> > > > -                       goto xdp_xmit;
> > > > -               case XDP_REDIRECT:
> > > > -                       stats->xdp_redirects++;
> > > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > > -                       if (err)
> > > > -                               goto err_xdp;
> > > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > +
> > > > +               case VIRTNET_XDP_RES_CONSUMED:
> > > >                         rcu_read_unlock();
> > > >                         goto xdp_xmit;
> > > > -               default:
> > > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > > -                       fallthrough;
> > > > -               case XDP_ABORTED:
> > > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > -                       goto err_xdp;
> > > > -               case XDP_DROP:
> > > > +
> > > > +               case VIRTNET_XDP_RES_DROP:
> > > >                         goto err_xdp;
> > > >                 }
> > > >         }
> > > > @@ -1277,7 +1313,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > > >         if (xdp_prog) {
> > > >                 unsigned int xdp_frags_truesz = 0;
> > > >                 struct skb_shared_info *shinfo;
> > > > -               struct xdp_frame *xdpf;
> > > >                 struct page *xdp_page;
> > > >                 struct xdp_buff xdp;
> > > >                 void *data;
> > > > @@ -1294,49 +1329,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > > >                 if (unlikely(err))
> > > >                         goto err_xdp_frags;
> > > >
> > > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > > -               stats->xdp_packets++;
> > > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > >
> > > >                 switch (act) {
> > > > -               case XDP_PASS:
> > > > +               case VIRTNET_XDP_RES_PASS:
> > > >                         head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
> > > >                         if (unlikely(!head_skb))
> > > >                                 goto err_xdp_frags;
> > > >
> > > >                         rcu_read_unlock();
> > > >                         return head_skb;
> > > > -               case XDP_TX:
> > > > -                       stats->xdp_tx++;
> > > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > > -                       if (unlikely(!xdpf)) {
> > > > -                               netdev_dbg(dev, "convert buff to frame failed for xdp\n");
> > >
> > > Nit: This debug is lost after the conversion.
> >
> > Will fix.
> >
> > Thanks.
> >
> > >
> > > Thanks
> > >
> > > > -                               goto err_xdp_frags;
> > > > -                       }
> > > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > -                       if (unlikely(!err)) {
> > > > -                               xdp_return_frame_rx_napi(xdpf);
> > > > -                       } else if (unlikely(err < 0)) {
> > > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > -                               goto err_xdp_frags;
> > > > -                       }
> > > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > > -                       rcu_read_unlock();
> > > > -                       goto xdp_xmit;
> > > > -               case XDP_REDIRECT:
> > > > -                       stats->xdp_redirects++;
> > > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > > -                       if (err)
> > > > -                               goto err_xdp_frags;
> > > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > +
> > > > +               case VIRTNET_XDP_RES_CONSUMED:
> > > >                         rcu_read_unlock();
> > > >                         goto xdp_xmit;
> > > > -               default:
> > > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > > -                       fallthrough;
> > > > -               case XDP_ABORTED:
> > > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > -                       fallthrough;
> > > > -               case XDP_DROP:
> > > > +
> > > > +               case VIRTNET_XDP_RES_DROP:
> > > >                         goto err_xdp_frags;
> > > >                 }
> > > >  err_xdp_frags:
> > > > --
> > > > 2.32.0.3.g01195cf9f
> > > >
> > >
> >
>
Jason Wang April 4, 2023, 6:35 a.m. UTC | #5
On Tue, Apr 4, 2023 at 2:22 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Tue, 4 Apr 2023 13:04:02 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Mon, Apr 3, 2023 at 12:17 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > On Mon, 3 Apr 2023 10:43:03 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > On Tue, Mar 28, 2023 at 8:04 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > >
> > > > > At present, we have two similar logic to perform the XDP prog.
> > > > >
> > > > > Therefore, this PATCH separates the code of executing XDP, which is
> > > > > conducive to later maintenance.
> > > > >
> > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > ---
> > > > >  drivers/net/virtio_net.c | 142 +++++++++++++++++++++------------------
> > > > >  1 file changed, 75 insertions(+), 67 deletions(-)
> > > > >
> > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > index bb426958cdd4..72b9d6ee4024 100644
> > > > > --- a/drivers/net/virtio_net.c
> > > > > +++ b/drivers/net/virtio_net.c
> > > > > @@ -301,6 +301,15 @@ struct padded_vnet_hdr {
> > > > >         char padding[12];
> > > > >  };
> > > > >
> > > > > +enum {
> > > > > +       /* xdp pass */
> > > > > +       VIRTNET_XDP_RES_PASS,
> > > > > +       /* drop packet. the caller needs to release the page. */
> > > > > +       VIRTNET_XDP_RES_DROP,
> > > > > +       /* packet is consumed by xdp. the caller needs to do nothing. */
> > > > > +       VIRTNET_XDP_RES_CONSUMED,
> > > > > +};
> > > >
> > > > I'd prefer this to be done on top unless it is a must. But I don't see
> > > > any advantage of introducing this, it's partial mapping of XDP action
> > > > and it needs to be extended when XDP action is extended. (And we've
> > > > already had: VIRTIO_XDP_REDIR and VIRTIO_XDP_TX ...)
> > >
> > > No, these are the three states of buffer after XDP processing.
> > >
> > > * PASS: goto make skb
> >
> > XDP_PASS goes for this.
> >
> > > * DROP: we should release buffer
> >
> > XDP_DROP and error conditions go with this.
> >
> > > * CUNSUMED: xdp prog used the buffer, we do nothing
> >
> > XDP_TX/XDP_REDIRECTION goes for this.
> >
> > So t virtnet_xdp_handler() just maps XDP ACTION plus the error
> > conditions to the above three states.
> >
> > We can simply map error to XDP_DROP like:
> >
> >        case XDP_TX:
> >               stats->xdp_tx++;
> >                xdpf = xdp_convert_buff_to_frame(xdp);
> >                if (unlikely(!xdpf))
> >                        return XDP_DROP;
> >
> > A good side effect is to avoid the xdp_xmit pointer to be passed to
> > the function.
>
>
> So, I guess you mean this:
>
>         switch (act) {
>         case XDP_PASS:
>                 /* handle pass */
>                 return skb;
>
>         case XDP_TX:
>                 *xdp_xmit |= VIRTIO_XDP_TX;
>                 goto xmit;
>
>         case XDP_REDIRECT:
>                 *xdp_xmit |= VIRTIO_XDP_REDIR;
>                 goto xmit;
>
>         case XDP_DROP:
>         default:
>                 goto err_xdp;
>         }
>
> I have to say there is no problem from the perspective of code implementation.

Note that this is the current logic where it is determined in
receive_small() and receive_mergeable().

>
> But if the a new ACTION liking XDP_TX,XDP_REDIRECT is added in the future, then
> we must modify all the callers.

This is fine since we only use a single type for XDP action.

> This is the benefit of using CUNSUMED.

It's very hard to say, e.g if we want to support cloning in the future.

>
> I think it is a good advantage to put xdp_xmit in virtnet_xdp_handler(),
> which makes the caller not care too much about these details.

This part I don't understand, having xdp_xmit means the caller need to
know whether it is xmited or redirected. The point of the enum is to
hide the XDP actions, but it's conflict with what xdp_xmit who want to
expose (part of) the XDP actions.

> If you take into
> account the problem of increasing the number of parameters, I advise to put it
> in rq.

I don't have strong opinion to introduce the enum, what I want to say
is, use a separated patch to do that.

Thanks

>
> Thanks.
>
>
>
> >
> > >
> > > The latter two are not particularly related to XDP ACTION. And it does not need
> > > to extend when XDP action is extended. At least I have not thought of this
> > > situation.
> >
> > What's the advantages of such indirection compared to using XDP action directly?
> >
> > Thanks
> >
> > >
> > >
> > > >
> > > > > +
> > > > >  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > >
> > > > > @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> > > > >         return ret;
> > > > >  }
> > > > >
> > > > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> > > > > +                              struct net_device *dev,
> > > > > +                              unsigned int *xdp_xmit,
> > > > > +                              struct virtnet_rq_stats *stats)
> > > > > +{
> > > > > +       struct xdp_frame *xdpf;
> > > > > +       int err;
> > > > > +       u32 act;
> > > > > +
> > > > > +       act = bpf_prog_run_xdp(xdp_prog, xdp);
> > > > > +       stats->xdp_packets++;
> > > > > +
> > > > > +       switch (act) {
> > > > > +       case XDP_PASS:
> > > > > +               return VIRTNET_XDP_RES_PASS;
> > > > > +
> > > > > +       case XDP_TX:
> > > > > +               stats->xdp_tx++;
> > > > > +               xdpf = xdp_convert_buff_to_frame(xdp);
> > > > > +               if (unlikely(!xdpf))
> > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > +
> > > > > +               err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > +               if (unlikely(!err)) {
> > > > > +                       xdp_return_frame_rx_napi(xdpf);
> > > > > +               } else if (unlikely(err < 0)) {
> > > > > +                       trace_xdp_exception(dev, xdp_prog, act);
> > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > +               }
> > > > > +
> > > > > +               *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > > > +
> > > > > +       case XDP_REDIRECT:
> > > > > +               stats->xdp_redirects++;
> > > > > +               err = xdp_do_redirect(dev, xdp, xdp_prog);
> > > > > +               if (err)
> > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > +
> > > > > +               *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > > > +
> > > > > +       default:
> > > > > +               bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
> > > > > +               fallthrough;
> > > > > +       case XDP_ABORTED:
> > > > > +               trace_xdp_exception(dev, xdp_prog, act);
> > > > > +               fallthrough;
> > > > > +       case XDP_DROP:
> > > > > +               return VIRTNET_XDP_RES_DROP;
> > > > > +       }
> > > > > +}
> > > > > +
> > > > >  static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
> > > > >  {
> > > > >         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
> > > > > @@ -876,7 +938,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > >         struct page *page = virt_to_head_page(buf);
> > > > >         unsigned int delta = 0;
> > > > >         struct page *xdp_page;
> > > > > -       int err;
> > > > >         unsigned int metasize = 0;
> > > > >
> > > > >         len -= vi->hdr_len;
> > > > > @@ -898,7 +959,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > >         xdp_prog = rcu_dereference(rq->xdp_prog);
> > > > >         if (xdp_prog) {
> > > > >                 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
> > > > > -               struct xdp_frame *xdpf;
> > > > >                 struct xdp_buff xdp;
> > > > >                 void *orig_data;
> > > > >                 u32 act;
> > > > > @@ -931,46 +991,22 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > >                 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
> > > > >                                  xdp_headroom, len, true);
> > > > >                 orig_data = xdp.data;
> > > > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > > > -               stats->xdp_packets++;
> > > > > +
> > > > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > > >
> > > > >                 switch (act) {
> > > > > -               case XDP_PASS:
> > > > > +               case VIRTNET_XDP_RES_PASS:
> > > > >                         /* Recalculate length in case bpf program changed it */
> > > > >                         delta = orig_data - xdp.data;
> > > > >                         len = xdp.data_end - xdp.data;
> > > > >                         metasize = xdp.data - xdp.data_meta;
> > > > >                         break;
> > > > > -               case XDP_TX:
> > > > > -                       stats->xdp_tx++;
> > > > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > > > -                       if (unlikely(!xdpf))
> > > > > -                               goto err_xdp;
> > > > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > -                       if (unlikely(!err)) {
> > > > > -                               xdp_return_frame_rx_napi(xdpf);
> > > > > -                       } else if (unlikely(err < 0)) {
> > > > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > -                               goto err_xdp;
> > > > > -                       }
> > > > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > -                       rcu_read_unlock();
> > > > > -                       goto xdp_xmit;
> > > > > -               case XDP_REDIRECT:
> > > > > -                       stats->xdp_redirects++;
> > > > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > > > -                       if (err)
> > > > > -                               goto err_xdp;
> > > > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > +
> > > > > +               case VIRTNET_XDP_RES_CONSUMED:
> > > > >                         rcu_read_unlock();
> > > > >                         goto xdp_xmit;
> > > > > -               default:
> > > > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > > > -                       fallthrough;
> > > > > -               case XDP_ABORTED:
> > > > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > -                       goto err_xdp;
> > > > > -               case XDP_DROP:
> > > > > +
> > > > > +               case VIRTNET_XDP_RES_DROP:
> > > > >                         goto err_xdp;
> > > > >                 }
> > > > >         }
> > > > > @@ -1277,7 +1313,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > > > >         if (xdp_prog) {
> > > > >                 unsigned int xdp_frags_truesz = 0;
> > > > >                 struct skb_shared_info *shinfo;
> > > > > -               struct xdp_frame *xdpf;
> > > > >                 struct page *xdp_page;
> > > > >                 struct xdp_buff xdp;
> > > > >                 void *data;
> > > > > @@ -1294,49 +1329,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > > > >                 if (unlikely(err))
> > > > >                         goto err_xdp_frags;
> > > > >
> > > > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > > > -               stats->xdp_packets++;
> > > > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > > >
> > > > >                 switch (act) {
> > > > > -               case XDP_PASS:
> > > > > +               case VIRTNET_XDP_RES_PASS:
> > > > >                         head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
> > > > >                         if (unlikely(!head_skb))
> > > > >                                 goto err_xdp_frags;
> > > > >
> > > > >                         rcu_read_unlock();
> > > > >                         return head_skb;
> > > > > -               case XDP_TX:
> > > > > -                       stats->xdp_tx++;
> > > > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > > > -                       if (unlikely(!xdpf)) {
> > > > > -                               netdev_dbg(dev, "convert buff to frame failed for xdp\n");
> > > >
> > > > Nit: This debug is lost after the conversion.
> > >
> > > Will fix.
> > >
> > > Thanks.
> > >
> > > >
> > > > Thanks
> > > >
> > > > > -                               goto err_xdp_frags;
> > > > > -                       }
> > > > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > -                       if (unlikely(!err)) {
> > > > > -                               xdp_return_frame_rx_napi(xdpf);
> > > > > -                       } else if (unlikely(err < 0)) {
> > > > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > -                               goto err_xdp_frags;
> > > > > -                       }
> > > > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > -                       rcu_read_unlock();
> > > > > -                       goto xdp_xmit;
> > > > > -               case XDP_REDIRECT:
> > > > > -                       stats->xdp_redirects++;
> > > > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > > > -                       if (err)
> > > > > -                               goto err_xdp_frags;
> > > > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > +
> > > > > +               case VIRTNET_XDP_RES_CONSUMED:
> > > > >                         rcu_read_unlock();
> > > > >                         goto xdp_xmit;
> > > > > -               default:
> > > > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > > > -                       fallthrough;
> > > > > -               case XDP_ABORTED:
> > > > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > -                       fallthrough;
> > > > > -               case XDP_DROP:
> > > > > +
> > > > > +               case VIRTNET_XDP_RES_DROP:
> > > > >                         goto err_xdp_frags;
> > > > >                 }
> > > > >  err_xdp_frags:
> > > > > --
> > > > > 2.32.0.3.g01195cf9f
> > > > >
> > > >
> > >
> >
>
Xuan Zhuo April 4, 2023, 6:44 a.m. UTC | #6
On Tue, 4 Apr 2023 14:35:05 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Tue, Apr 4, 2023 at 2:22 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Tue, 4 Apr 2023 13:04:02 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > On Mon, Apr 3, 2023 at 12:17 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > >
> > > > On Mon, 3 Apr 2023 10:43:03 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > > On Tue, Mar 28, 2023 at 8:04 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > > >
> > > > > > At present, we have two similar logic to perform the XDP prog.
> > > > > >
> > > > > > Therefore, this PATCH separates the code of executing XDP, which is
> > > > > > conducive to later maintenance.
> > > > > >
> > > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > > ---
> > > > > >  drivers/net/virtio_net.c | 142 +++++++++++++++++++++------------------
> > > > > >  1 file changed, 75 insertions(+), 67 deletions(-)
> > > > > >
> > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > > index bb426958cdd4..72b9d6ee4024 100644
> > > > > > --- a/drivers/net/virtio_net.c
> > > > > > +++ b/drivers/net/virtio_net.c
> > > > > > @@ -301,6 +301,15 @@ struct padded_vnet_hdr {
> > > > > >         char padding[12];
> > > > > >  };
> > > > > >
> > > > > > +enum {
> > > > > > +       /* xdp pass */
> > > > > > +       VIRTNET_XDP_RES_PASS,
> > > > > > +       /* drop packet. the caller needs to release the page. */
> > > > > > +       VIRTNET_XDP_RES_DROP,
> > > > > > +       /* packet is consumed by xdp. the caller needs to do nothing. */
> > > > > > +       VIRTNET_XDP_RES_CONSUMED,
> > > > > > +};
> > > > >
> > > > > I'd prefer this to be done on top unless it is a must. But I don't see
> > > > > any advantage of introducing this, it's partial mapping of XDP action
> > > > > and it needs to be extended when XDP action is extended. (And we've
> > > > > already had: VIRTIO_XDP_REDIR and VIRTIO_XDP_TX ...)
> > > >
> > > > No, these are the three states of buffer after XDP processing.
> > > >
> > > > * PASS: goto make skb
> > >
> > > XDP_PASS goes for this.
> > >
> > > > * DROP: we should release buffer
> > >
> > > XDP_DROP and error conditions go with this.
> > >
> > > > * CUNSUMED: xdp prog used the buffer, we do nothing
> > >
> > > XDP_TX/XDP_REDIRECTION goes for this.
> > >
> > > So t virtnet_xdp_handler() just maps XDP ACTION plus the error
> > > conditions to the above three states.
> > >
> > > We can simply map error to XDP_DROP like:
> > >
> > >        case XDP_TX:
> > >               stats->xdp_tx++;
> > >                xdpf = xdp_convert_buff_to_frame(xdp);
> > >                if (unlikely(!xdpf))
> > >                        return XDP_DROP;
> > >
> > > A good side effect is to avoid the xdp_xmit pointer to be passed to
> > > the function.
> >
> >
> > So, I guess you mean this:
> >
> >         switch (act) {
> >         case XDP_PASS:
> >                 /* handle pass */
> >                 return skb;
> >
> >         case XDP_TX:
> >                 *xdp_xmit |= VIRTIO_XDP_TX;
> >                 goto xmit;
> >
> >         case XDP_REDIRECT:
> >                 *xdp_xmit |= VIRTIO_XDP_REDIR;
> >                 goto xmit;
> >
> >         case XDP_DROP:
> >         default:
> >                 goto err_xdp;
> >         }
> >
> > I have to say there is no problem from the perspective of code implementation.
>
> Note that this is the current logic where it is determined in
> receive_small() and receive_mergeable().

Yes, but the purpose of this patches is to simplify the call.

>
> >
> > But if the a new ACTION liking XDP_TX,XDP_REDIRECT is added in the future, then
> > we must modify all the callers.
>
> This is fine since we only use a single type for XDP action.

a single type?

>
> > This is the benefit of using CUNSUMED.
>
> It's very hard to say, e.g if we want to support cloning in the future.

cloning? You mean clone one new buffer.

It is true that no matter what realization, the logic must be modified.

>
> >
> > I think it is a good advantage to put xdp_xmit in virtnet_xdp_handler(),
> > which makes the caller not care too much about these details.
>
> This part I don't understand, having xdp_xmit means the caller need to
> know whether it is xmited or redirected. The point of the enum is to
> hide the XDP actions, but it's conflict with what xdp_xmit who want to
> expose (part of) the XDP actions.

I mean, no matter what virtnet_xdp_handler () returns? XDP_ACTION or some one I
defined, I want to hide the modification of xdp_xmit to virtnet_xdp_handler().

Even if virtnet_xdp_handler() returns XDP_TX, we can also complete the
modification of XDP_XMIT within Virtnet_xdp_handler().


>
> > If you take into
> > account the problem of increasing the number of parameters, I advise to put it
> > in rq.
>
> I don't have strong opinion to introduce the enum,

OK, I will drop these new enums.

> what I want to say
> is, use a separated patch to do that.

Does this part refer to putting xdp_xmit in rq?

Thanks.


>
> Thanks
>
> >
> > Thanks.
> >
> >
> >
> > >
> > > >
> > > > The latter two are not particularly related to XDP ACTION. And it does not need
> > > > to extend when XDP action is extended. At least I have not thought of this
> > > > situation.
> > >
> > > What's the advantages of such indirection compared to using XDP action directly?
> > >
> > > Thanks
> > >
> > > >
> > > >
> > > > >
> > > > > > +
> > > > > >  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > >
> > > > > > @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> > > > > >         return ret;
> > > > > >  }
> > > > > >
> > > > > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> > > > > > +                              struct net_device *dev,
> > > > > > +                              unsigned int *xdp_xmit,
> > > > > > +                              struct virtnet_rq_stats *stats)
> > > > > > +{
> > > > > > +       struct xdp_frame *xdpf;
> > > > > > +       int err;
> > > > > > +       u32 act;
> > > > > > +
> > > > > > +       act = bpf_prog_run_xdp(xdp_prog, xdp);
> > > > > > +       stats->xdp_packets++;
> > > > > > +
> > > > > > +       switch (act) {
> > > > > > +       case XDP_PASS:
> > > > > > +               return VIRTNET_XDP_RES_PASS;
> > > > > > +
> > > > > > +       case XDP_TX:
> > > > > > +               stats->xdp_tx++;
> > > > > > +               xdpf = xdp_convert_buff_to_frame(xdp);
> > > > > > +               if (unlikely(!xdpf))
> > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > +
> > > > > > +               err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > +               if (unlikely(!err)) {
> > > > > > +                       xdp_return_frame_rx_napi(xdpf);
> > > > > > +               } else if (unlikely(err < 0)) {
> > > > > > +                       trace_xdp_exception(dev, xdp_prog, act);
> > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > +               }
> > > > > > +
> > > > > > +               *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > > > > +
> > > > > > +       case XDP_REDIRECT:
> > > > > > +               stats->xdp_redirects++;
> > > > > > +               err = xdp_do_redirect(dev, xdp, xdp_prog);
> > > > > > +               if (err)
> > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > +
> > > > > > +               *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > > > > +
> > > > > > +       default:
> > > > > > +               bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
> > > > > > +               fallthrough;
> > > > > > +       case XDP_ABORTED:
> > > > > > +               trace_xdp_exception(dev, xdp_prog, act);
> > > > > > +               fallthrough;
> > > > > > +       case XDP_DROP:
> > > > > > +               return VIRTNET_XDP_RES_DROP;
> > > > > > +       }
> > > > > > +}
> > > > > > +
> > > > > >  static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
> > > > > >  {
> > > > > >         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
> > > > > > @@ -876,7 +938,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > >         struct page *page = virt_to_head_page(buf);
> > > > > >         unsigned int delta = 0;
> > > > > >         struct page *xdp_page;
> > > > > > -       int err;
> > > > > >         unsigned int metasize = 0;
> > > > > >
> > > > > >         len -= vi->hdr_len;
> > > > > > @@ -898,7 +959,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > >         xdp_prog = rcu_dereference(rq->xdp_prog);
> > > > > >         if (xdp_prog) {
> > > > > >                 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
> > > > > > -               struct xdp_frame *xdpf;
> > > > > >                 struct xdp_buff xdp;
> > > > > >                 void *orig_data;
> > > > > >                 u32 act;
> > > > > > @@ -931,46 +991,22 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > >                 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
> > > > > >                                  xdp_headroom, len, true);
> > > > > >                 orig_data = xdp.data;
> > > > > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > > > > -               stats->xdp_packets++;
> > > > > > +
> > > > > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > > > >
> > > > > >                 switch (act) {
> > > > > > -               case XDP_PASS:
> > > > > > +               case VIRTNET_XDP_RES_PASS:
> > > > > >                         /* Recalculate length in case bpf program changed it */
> > > > > >                         delta = orig_data - xdp.data;
> > > > > >                         len = xdp.data_end - xdp.data;
> > > > > >                         metasize = xdp.data - xdp.data_meta;
> > > > > >                         break;
> > > > > > -               case XDP_TX:
> > > > > > -                       stats->xdp_tx++;
> > > > > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > > > > -                       if (unlikely(!xdpf))
> > > > > > -                               goto err_xdp;
> > > > > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > -                       if (unlikely(!err)) {
> > > > > > -                               xdp_return_frame_rx_napi(xdpf);
> > > > > > -                       } else if (unlikely(err < 0)) {
> > > > > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > -                               goto err_xdp;
> > > > > > -                       }
> > > > > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > -                       rcu_read_unlock();
> > > > > > -                       goto xdp_xmit;
> > > > > > -               case XDP_REDIRECT:
> > > > > > -                       stats->xdp_redirects++;
> > > > > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > > > > -                       if (err)
> > > > > > -                               goto err_xdp;
> > > > > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > +
> > > > > > +               case VIRTNET_XDP_RES_CONSUMED:
> > > > > >                         rcu_read_unlock();
> > > > > >                         goto xdp_xmit;
> > > > > > -               default:
> > > > > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > > > > -                       fallthrough;
> > > > > > -               case XDP_ABORTED:
> > > > > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > -                       goto err_xdp;
> > > > > > -               case XDP_DROP:
> > > > > > +
> > > > > > +               case VIRTNET_XDP_RES_DROP:
> > > > > >                         goto err_xdp;
> > > > > >                 }
> > > > > >         }
> > > > > > @@ -1277,7 +1313,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > > > > >         if (xdp_prog) {
> > > > > >                 unsigned int xdp_frags_truesz = 0;
> > > > > >                 struct skb_shared_info *shinfo;
> > > > > > -               struct xdp_frame *xdpf;
> > > > > >                 struct page *xdp_page;
> > > > > >                 struct xdp_buff xdp;
> > > > > >                 void *data;
> > > > > > @@ -1294,49 +1329,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > > > > >                 if (unlikely(err))
> > > > > >                         goto err_xdp_frags;
> > > > > >
> > > > > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > > > > -               stats->xdp_packets++;
> > > > > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > > > >
> > > > > >                 switch (act) {
> > > > > > -               case XDP_PASS:
> > > > > > +               case VIRTNET_XDP_RES_PASS:
> > > > > >                         head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
> > > > > >                         if (unlikely(!head_skb))
> > > > > >                                 goto err_xdp_frags;
> > > > > >
> > > > > >                         rcu_read_unlock();
> > > > > >                         return head_skb;
> > > > > > -               case XDP_TX:
> > > > > > -                       stats->xdp_tx++;
> > > > > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > > > > -                       if (unlikely(!xdpf)) {
> > > > > > -                               netdev_dbg(dev, "convert buff to frame failed for xdp\n");
> > > > >
> > > > > Nit: This debug is lost after the conversion.
> > > >
> > > > Will fix.
> > > >
> > > > Thanks.
> > > >
> > > > >
> > > > > Thanks
> > > > >
> > > > > > -                               goto err_xdp_frags;
> > > > > > -                       }
> > > > > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > -                       if (unlikely(!err)) {
> > > > > > -                               xdp_return_frame_rx_napi(xdpf);
> > > > > > -                       } else if (unlikely(err < 0)) {
> > > > > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > -                               goto err_xdp_frags;
> > > > > > -                       }
> > > > > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > -                       rcu_read_unlock();
> > > > > > -                       goto xdp_xmit;
> > > > > > -               case XDP_REDIRECT:
> > > > > > -                       stats->xdp_redirects++;
> > > > > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > > > > -                       if (err)
> > > > > > -                               goto err_xdp_frags;
> > > > > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > +
> > > > > > +               case VIRTNET_XDP_RES_CONSUMED:
> > > > > >                         rcu_read_unlock();
> > > > > >                         goto xdp_xmit;
> > > > > > -               default:
> > > > > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > > > > -                       fallthrough;
> > > > > > -               case XDP_ABORTED:
> > > > > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > -                       fallthrough;
> > > > > > -               case XDP_DROP:
> > > > > > +
> > > > > > +               case VIRTNET_XDP_RES_DROP:
> > > > > >                         goto err_xdp_frags;
> > > > > >                 }
> > > > > >  err_xdp_frags:
> > > > > > --
> > > > > > 2.32.0.3.g01195cf9f
> > > > > >
> > > > >
> > > >
> > >
> >
>
Jason Wang April 4, 2023, 7:01 a.m. UTC | #7
On Tue, Apr 4, 2023 at 2:55 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Tue, 4 Apr 2023 14:35:05 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Tue, Apr 4, 2023 at 2:22 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > On Tue, 4 Apr 2023 13:04:02 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > On Mon, Apr 3, 2023 at 12:17 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > >
> > > > > On Mon, 3 Apr 2023 10:43:03 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > > > On Tue, Mar 28, 2023 at 8:04 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > > > >
> > > > > > > At present, we have two similar logic to perform the XDP prog.
> > > > > > >
> > > > > > > Therefore, this PATCH separates the code of executing XDP, which is
> > > > > > > conducive to later maintenance.
> > > > > > >
> > > > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > > > ---
> > > > > > >  drivers/net/virtio_net.c | 142 +++++++++++++++++++++------------------
> > > > > > >  1 file changed, 75 insertions(+), 67 deletions(-)
> > > > > > >
> > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > > > index bb426958cdd4..72b9d6ee4024 100644
> > > > > > > --- a/drivers/net/virtio_net.c
> > > > > > > +++ b/drivers/net/virtio_net.c
> > > > > > > @@ -301,6 +301,15 @@ struct padded_vnet_hdr {
> > > > > > >         char padding[12];
> > > > > > >  };
> > > > > > >
> > > > > > > +enum {
> > > > > > > +       /* xdp pass */
> > > > > > > +       VIRTNET_XDP_RES_PASS,
> > > > > > > +       /* drop packet. the caller needs to release the page. */
> > > > > > > +       VIRTNET_XDP_RES_DROP,
> > > > > > > +       /* packet is consumed by xdp. the caller needs to do nothing. */
> > > > > > > +       VIRTNET_XDP_RES_CONSUMED,
> > > > > > > +};
> > > > > >
> > > > > > I'd prefer this to be done on top unless it is a must. But I don't see
> > > > > > any advantage of introducing this, it's partial mapping of XDP action
> > > > > > and it needs to be extended when XDP action is extended. (And we've
> > > > > > already had: VIRTIO_XDP_REDIR and VIRTIO_XDP_TX ...)
> > > > >
> > > > > No, these are the three states of buffer after XDP processing.
> > > > >
> > > > > * PASS: goto make skb
> > > >
> > > > XDP_PASS goes for this.
> > > >
> > > > > * DROP: we should release buffer
> > > >
> > > > XDP_DROP and error conditions go with this.
> > > >
> > > > > * CUNSUMED: xdp prog used the buffer, we do nothing
> > > >
> > > > XDP_TX/XDP_REDIRECTION goes for this.
> > > >
> > > > So t virtnet_xdp_handler() just maps XDP ACTION plus the error
> > > > conditions to the above three states.
> > > >
> > > > We can simply map error to XDP_DROP like:
> > > >
> > > >        case XDP_TX:
> > > >               stats->xdp_tx++;
> > > >                xdpf = xdp_convert_buff_to_frame(xdp);
> > > >                if (unlikely(!xdpf))
> > > >                        return XDP_DROP;
> > > >
> > > > A good side effect is to avoid the xdp_xmit pointer to be passed to
> > > > the function.
> > >
> > >
> > > So, I guess you mean this:
> > >
> > >         switch (act) {
> > >         case XDP_PASS:
> > >                 /* handle pass */
> > >                 return skb;
> > >
> > >         case XDP_TX:
> > >                 *xdp_xmit |= VIRTIO_XDP_TX;
> > >                 goto xmit;
> > >
> > >         case XDP_REDIRECT:
> > >                 *xdp_xmit |= VIRTIO_XDP_REDIR;
> > >                 goto xmit;
> > >
> > >         case XDP_DROP:
> > >         default:
> > >                 goto err_xdp;
> > >         }
> > >
> > > I have to say there is no problem from the perspective of code implementation.
> >
> > Note that this is the current logic where it is determined in
> > receive_small() and receive_mergeable().
>
> Yes, but the purpose of this patches is to simplify the call.

You mean simplify the receive_small()/mergeable()?

>
> >
> > >
> > > But if the a new ACTION liking XDP_TX,XDP_REDIRECT is added in the future, then
> > > we must modify all the callers.
> >
> > This is fine since we only use a single type for XDP action.
>
> a single type?

Instead of (partial) duplicating XDP actions in the new enums.

>
> >
> > > This is the benefit of using CUNSUMED.
> >
> > It's very hard to say, e.g if we want to support cloning in the future.
>
> cloning? You mean clone one new buffer.
>
> It is true that no matter what realization, the logic must be modified.

Yes.

>
> >
> > >
> > > I think it is a good advantage to put xdp_xmit in virtnet_xdp_handler(),
> > > which makes the caller not care too much about these details.
> >
> > This part I don't understand, having xdp_xmit means the caller need to
> > know whether it is xmited or redirected. The point of the enum is to
> > hide the XDP actions, but it's conflict with what xdp_xmit who want to
> > expose (part of) the XDP actions.
>
> I mean, no matter what virtnet_xdp_handler () returns? XDP_ACTION or some one I
> defined, I want to hide the modification of xdp_xmit to virtnet_xdp_handler().
>
> Even if virtnet_xdp_handler() returns XDP_TX, we can also complete the
> modification of XDP_XMIT within Virtnet_xdp_handler().
>
>
> >
> > > If you take into
> > > account the problem of increasing the number of parameters, I advise to put it
> > > in rq.
> >
> > I don't have strong opinion to introduce the enum,
>
> OK, I will drop these new enums.

Just to make sure we are at the same page. I mean, if there is no
objection from others, I'm ok to have an enum, but we need to use a
separate patch to do that.

>
> > what I want to say
> > is, use a separated patch to do that.
>
> Does this part refer to putting xdp_xmit in rq?

I mean it's better to be done separately. But I don't see the
advantage of this other than reducing the parameters.

Thanks

>
> Thanks.
>
>
> >
> > Thanks
> >
> > >
> > > Thanks.
> > >
> > >
> > >
> > > >
> > > > >
> > > > > The latter two are not particularly related to XDP ACTION. And it does not need
> > > > > to extend when XDP action is extended. At least I have not thought of this
> > > > > situation.
> > > >
> > > > What's the advantages of such indirection compared to using XDP action directly?
> > > >
> > > > Thanks
> > > >
> > > > >
> > > > >
> > > > > >
> > > > > > > +
> > > > > > >  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > > >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > > >
> > > > > > > @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> > > > > > >         return ret;
> > > > > > >  }
> > > > > > >
> > > > > > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> > > > > > > +                              struct net_device *dev,
> > > > > > > +                              unsigned int *xdp_xmit,
> > > > > > > +                              struct virtnet_rq_stats *stats)
> > > > > > > +{
> > > > > > > +       struct xdp_frame *xdpf;
> > > > > > > +       int err;
> > > > > > > +       u32 act;
> > > > > > > +
> > > > > > > +       act = bpf_prog_run_xdp(xdp_prog, xdp);
> > > > > > > +       stats->xdp_packets++;
> > > > > > > +
> > > > > > > +       switch (act) {
> > > > > > > +       case XDP_PASS:
> > > > > > > +               return VIRTNET_XDP_RES_PASS;
> > > > > > > +
> > > > > > > +       case XDP_TX:
> > > > > > > +               stats->xdp_tx++;
> > > > > > > +               xdpf = xdp_convert_buff_to_frame(xdp);
> > > > > > > +               if (unlikely(!xdpf))
> > > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > > +
> > > > > > > +               err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > > +               if (unlikely(!err)) {
> > > > > > > +                       xdp_return_frame_rx_napi(xdpf);
> > > > > > > +               } else if (unlikely(err < 0)) {
> > > > > > > +                       trace_xdp_exception(dev, xdp_prog, act);
> > > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > > +               }
> > > > > > > +
> > > > > > > +               *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > > > > > +
> > > > > > > +       case XDP_REDIRECT:
> > > > > > > +               stats->xdp_redirects++;
> > > > > > > +               err = xdp_do_redirect(dev, xdp, xdp_prog);
> > > > > > > +               if (err)
> > > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > > +
> > > > > > > +               *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > > > > > +
> > > > > > > +       default:
> > > > > > > +               bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
> > > > > > > +               fallthrough;
> > > > > > > +       case XDP_ABORTED:
> > > > > > > +               trace_xdp_exception(dev, xdp_prog, act);
> > > > > > > +               fallthrough;
> > > > > > > +       case XDP_DROP:
> > > > > > > +               return VIRTNET_XDP_RES_DROP;
> > > > > > > +       }
> > > > > > > +}
> > > > > > > +
> > > > > > >  static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
> > > > > > >  {
> > > > > > >         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
> > > > > > > @@ -876,7 +938,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > > >         struct page *page = virt_to_head_page(buf);
> > > > > > >         unsigned int delta = 0;
> > > > > > >         struct page *xdp_page;
> > > > > > > -       int err;
> > > > > > >         unsigned int metasize = 0;
> > > > > > >
> > > > > > >         len -= vi->hdr_len;
> > > > > > > @@ -898,7 +959,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > > >         xdp_prog = rcu_dereference(rq->xdp_prog);
> > > > > > >         if (xdp_prog) {
> > > > > > >                 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
> > > > > > > -               struct xdp_frame *xdpf;
> > > > > > >                 struct xdp_buff xdp;
> > > > > > >                 void *orig_data;
> > > > > > >                 u32 act;
> > > > > > > @@ -931,46 +991,22 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > > >                 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
> > > > > > >                                  xdp_headroom, len, true);
> > > > > > >                 orig_data = xdp.data;
> > > > > > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > > > > > -               stats->xdp_packets++;
> > > > > > > +
> > > > > > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > > > > >
> > > > > > >                 switch (act) {
> > > > > > > -               case XDP_PASS:
> > > > > > > +               case VIRTNET_XDP_RES_PASS:
> > > > > > >                         /* Recalculate length in case bpf program changed it */
> > > > > > >                         delta = orig_data - xdp.data;
> > > > > > >                         len = xdp.data_end - xdp.data;
> > > > > > >                         metasize = xdp.data - xdp.data_meta;
> > > > > > >                         break;
> > > > > > > -               case XDP_TX:
> > > > > > > -                       stats->xdp_tx++;
> > > > > > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > > > > > -                       if (unlikely(!xdpf))
> > > > > > > -                               goto err_xdp;
> > > > > > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > > -                       if (unlikely(!err)) {
> > > > > > > -                               xdp_return_frame_rx_napi(xdpf);
> > > > > > > -                       } else if (unlikely(err < 0)) {
> > > > > > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > -                               goto err_xdp;
> > > > > > > -                       }
> > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > > -                       rcu_read_unlock();
> > > > > > > -                       goto xdp_xmit;
> > > > > > > -               case XDP_REDIRECT:
> > > > > > > -                       stats->xdp_redirects++;
> > > > > > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > > > > > -                       if (err)
> > > > > > > -                               goto err_xdp;
> > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > > +
> > > > > > > +               case VIRTNET_XDP_RES_CONSUMED:
> > > > > > >                         rcu_read_unlock();
> > > > > > >                         goto xdp_xmit;
> > > > > > > -               default:
> > > > > > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > > > > > -                       fallthrough;
> > > > > > > -               case XDP_ABORTED:
> > > > > > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > -                       goto err_xdp;
> > > > > > > -               case XDP_DROP:
> > > > > > > +
> > > > > > > +               case VIRTNET_XDP_RES_DROP:
> > > > > > >                         goto err_xdp;
> > > > > > >                 }
> > > > > > >         }
> > > > > > > @@ -1277,7 +1313,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > > > > > >         if (xdp_prog) {
> > > > > > >                 unsigned int xdp_frags_truesz = 0;
> > > > > > >                 struct skb_shared_info *shinfo;
> > > > > > > -               struct xdp_frame *xdpf;
> > > > > > >                 struct page *xdp_page;
> > > > > > >                 struct xdp_buff xdp;
> > > > > > >                 void *data;
> > > > > > > @@ -1294,49 +1329,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > > > > > >                 if (unlikely(err))
> > > > > > >                         goto err_xdp_frags;
> > > > > > >
> > > > > > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > > > > > -               stats->xdp_packets++;
> > > > > > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > > > > >
> > > > > > >                 switch (act) {
> > > > > > > -               case XDP_PASS:
> > > > > > > +               case VIRTNET_XDP_RES_PASS:
> > > > > > >                         head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
> > > > > > >                         if (unlikely(!head_skb))
> > > > > > >                                 goto err_xdp_frags;
> > > > > > >
> > > > > > >                         rcu_read_unlock();
> > > > > > >                         return head_skb;
> > > > > > > -               case XDP_TX:
> > > > > > > -                       stats->xdp_tx++;
> > > > > > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > > > > > -                       if (unlikely(!xdpf)) {
> > > > > > > -                               netdev_dbg(dev, "convert buff to frame failed for xdp\n");
> > > > > >
> > > > > > Nit: This debug is lost after the conversion.
> > > > >
> > > > > Will fix.
> > > > >
> > > > > Thanks.
> > > > >
> > > > > >
> > > > > > Thanks
> > > > > >
> > > > > > > -                               goto err_xdp_frags;
> > > > > > > -                       }
> > > > > > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > > -                       if (unlikely(!err)) {
> > > > > > > -                               xdp_return_frame_rx_napi(xdpf);
> > > > > > > -                       } else if (unlikely(err < 0)) {
> > > > > > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > -                               goto err_xdp_frags;
> > > > > > > -                       }
> > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > > -                       rcu_read_unlock();
> > > > > > > -                       goto xdp_xmit;
> > > > > > > -               case XDP_REDIRECT:
> > > > > > > -                       stats->xdp_redirects++;
> > > > > > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > > > > > -                       if (err)
> > > > > > > -                               goto err_xdp_frags;
> > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > > +
> > > > > > > +               case VIRTNET_XDP_RES_CONSUMED:
> > > > > > >                         rcu_read_unlock();
> > > > > > >                         goto xdp_xmit;
> > > > > > > -               default:
> > > > > > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > > > > > -                       fallthrough;
> > > > > > > -               case XDP_ABORTED:
> > > > > > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > -                       fallthrough;
> > > > > > > -               case XDP_DROP:
> > > > > > > +
> > > > > > > +               case VIRTNET_XDP_RES_DROP:
> > > > > > >                         goto err_xdp_frags;
> > > > > > >                 }
> > > > > > >  err_xdp_frags:
> > > > > > > --
> > > > > > > 2.32.0.3.g01195cf9f
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
>
Xuan Zhuo April 4, 2023, 7:06 a.m. UTC | #8
On Tue, 4 Apr 2023 15:01:36 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Tue, Apr 4, 2023 at 2:55 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Tue, 4 Apr 2023 14:35:05 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > On Tue, Apr 4, 2023 at 2:22 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > >
> > > > On Tue, 4 Apr 2023 13:04:02 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > > On Mon, Apr 3, 2023 at 12:17 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > > >
> > > > > > On Mon, 3 Apr 2023 10:43:03 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > > > > On Tue, Mar 28, 2023 at 8:04 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > > > > >
> > > > > > > > At present, we have two similar logic to perform the XDP prog.
> > > > > > > >
> > > > > > > > Therefore, this PATCH separates the code of executing XDP, which is
> > > > > > > > conducive to later maintenance.
> > > > > > > >
> > > > > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > > > > ---
> > > > > > > >  drivers/net/virtio_net.c | 142 +++++++++++++++++++++------------------
> > > > > > > >  1 file changed, 75 insertions(+), 67 deletions(-)
> > > > > > > >
> > > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > > > > index bb426958cdd4..72b9d6ee4024 100644
> > > > > > > > --- a/drivers/net/virtio_net.c
> > > > > > > > +++ b/drivers/net/virtio_net.c
> > > > > > > > @@ -301,6 +301,15 @@ struct padded_vnet_hdr {
> > > > > > > >         char padding[12];
> > > > > > > >  };
> > > > > > > >
> > > > > > > > +enum {
> > > > > > > > +       /* xdp pass */
> > > > > > > > +       VIRTNET_XDP_RES_PASS,
> > > > > > > > +       /* drop packet. the caller needs to release the page. */
> > > > > > > > +       VIRTNET_XDP_RES_DROP,
> > > > > > > > +       /* packet is consumed by xdp. the caller needs to do nothing. */
> > > > > > > > +       VIRTNET_XDP_RES_CONSUMED,
> > > > > > > > +};
> > > > > > >
> > > > > > > I'd prefer this to be done on top unless it is a must. But I don't see
> > > > > > > any advantage of introducing this, it's partial mapping of XDP action
> > > > > > > and it needs to be extended when XDP action is extended. (And we've
> > > > > > > already had: VIRTIO_XDP_REDIR and VIRTIO_XDP_TX ...)
> > > > > >
> > > > > > No, these are the three states of buffer after XDP processing.
> > > > > >
> > > > > > * PASS: goto make skb
> > > > >
> > > > > XDP_PASS goes for this.
> > > > >
> > > > > > * DROP: we should release buffer
> > > > >
> > > > > XDP_DROP and error conditions go with this.
> > > > >
> > > > > > * CUNSUMED: xdp prog used the buffer, we do nothing
> > > > >
> > > > > XDP_TX/XDP_REDIRECTION goes for this.
> > > > >
> > > > > So t virtnet_xdp_handler() just maps XDP ACTION plus the error
> > > > > conditions to the above three states.
> > > > >
> > > > > We can simply map error to XDP_DROP like:
> > > > >
> > > > >        case XDP_TX:
> > > > >               stats->xdp_tx++;
> > > > >                xdpf = xdp_convert_buff_to_frame(xdp);
> > > > >                if (unlikely(!xdpf))
> > > > >                        return XDP_DROP;
> > > > >
> > > > > A good side effect is to avoid the xdp_xmit pointer to be passed to
> > > > > the function.
> > > >
> > > >
> > > > So, I guess you mean this:
> > > >
> > > >         switch (act) {
> > > >         case XDP_PASS:
> > > >                 /* handle pass */
> > > >                 return skb;
> > > >
> > > >         case XDP_TX:
> > > >                 *xdp_xmit |= VIRTIO_XDP_TX;
> > > >                 goto xmit;
> > > >
> > > >         case XDP_REDIRECT:
> > > >                 *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > >                 goto xmit;
> > > >
> > > >         case XDP_DROP:
> > > >         default:
> > > >                 goto err_xdp;
> > > >         }
> > > >
> > > > I have to say there is no problem from the perspective of code implementation.
> > >
> > > Note that this is the current logic where it is determined in
> > > receive_small() and receive_mergeable().
> >
> > Yes, but the purpose of this patches is to simplify the call.
>
> You mean simplify the receive_small()/mergeable()?

YES.


>
> >
> > >
> > > >
> > > > But if the a new ACTION liking XDP_TX,XDP_REDIRECT is added in the future, then
> > > > we must modify all the callers.
> > >
> > > This is fine since we only use a single type for XDP action.
> >
> > a single type?
>
> Instead of (partial) duplicating XDP actions in the new enums.


I think it's really misunderstand here. So your thought is these?

   VIRTNET_XDP_RES_PASS,
   VIRTNET_XDP_RES_TX_REDIRECT,
   VIRTNET_XDP_RES_DROP,



>
> >
> > >
> > > > This is the benefit of using CUNSUMED.
> > >
> > > It's very hard to say, e.g if we want to support cloning in the future.
> >
> > cloning? You mean clone one new buffer.
> >
> > It is true that no matter what realization, the logic must be modified.
>
> Yes.
>
> >
> > >
> > > >
> > > > I think it is a good advantage to put xdp_xmit in virtnet_xdp_handler(),
> > > > which makes the caller not care too much about these details.
> > >
> > > This part I don't understand, having xdp_xmit means the caller need to
> > > know whether it is xmited or redirected. The point of the enum is to
> > > hide the XDP actions, but it's conflict with what xdp_xmit who want to
> > > expose (part of) the XDP actions.
> >
> > I mean, no matter what virtnet_xdp_handler () returns? XDP_ACTION or some one I
> > defined, I want to hide the modification of xdp_xmit to virtnet_xdp_handler().
> >
> > Even if virtnet_xdp_handler() returns XDP_TX, we can also complete the
> > modification of XDP_XMIT within Virtnet_xdp_handler().
> >
> >
> > >
> > > > If you take into
> > > > account the problem of increasing the number of parameters, I advise to put it
> > > > in rq.
> > >
> > > I don't have strong opinion to introduce the enum,
> >
> > OK, I will drop these new enums.
>
> Just to make sure we are at the same page. I mean, if there is no
> objection from others, I'm ok to have an enum, but we need to use a
> separate patch to do that.

Do you refer to introduce enums alone without virtnet_xdp_handler()?

>
> >
> > > what I want to say
> > > is, use a separated patch to do that.
> >
> > Does this part refer to putting xdp_xmit in rq?
>
> I mean it's better to be done separately. But I don't see the
> advantage of this other than reducing the parameters.

I think so also.

Thanks.


>
> Thanks
>
> >
> > Thanks.
> >
> >
> > >
> > > Thanks
> > >
> > > >
> > > > Thanks.
> > > >
> > > >
> > > >
> > > > >
> > > > > >
> > > > > > The latter two are not particularly related to XDP ACTION. And it does not need
> > > > > > to extend when XDP action is extended. At least I have not thought of this
> > > > > > situation.
> > > > >
> > > > > What's the advantages of such indirection compared to using XDP action directly?
> > > > >
> > > > > Thanks
> > > > >
> > > > > >
> > > > > >
> > > > > > >
> > > > > > > > +
> > > > > > > >  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > > > >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > > > >
> > > > > > > > @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> > > > > > > >         return ret;
> > > > > > > >  }
> > > > > > > >
> > > > > > > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> > > > > > > > +                              struct net_device *dev,
> > > > > > > > +                              unsigned int *xdp_xmit,
> > > > > > > > +                              struct virtnet_rq_stats *stats)
> > > > > > > > +{
> > > > > > > > +       struct xdp_frame *xdpf;
> > > > > > > > +       int err;
> > > > > > > > +       u32 act;
> > > > > > > > +
> > > > > > > > +       act = bpf_prog_run_xdp(xdp_prog, xdp);
> > > > > > > > +       stats->xdp_packets++;
> > > > > > > > +
> > > > > > > > +       switch (act) {
> > > > > > > > +       case XDP_PASS:
> > > > > > > > +               return VIRTNET_XDP_RES_PASS;
> > > > > > > > +
> > > > > > > > +       case XDP_TX:
> > > > > > > > +               stats->xdp_tx++;
> > > > > > > > +               xdpf = xdp_convert_buff_to_frame(xdp);
> > > > > > > > +               if (unlikely(!xdpf))
> > > > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > > > +
> > > > > > > > +               err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > > > +               if (unlikely(!err)) {
> > > > > > > > +                       xdp_return_frame_rx_napi(xdpf);
> > > > > > > > +               } else if (unlikely(err < 0)) {
> > > > > > > > +                       trace_xdp_exception(dev, xdp_prog, act);
> > > > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > > > +               }
> > > > > > > > +
> > > > > > > > +               *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > > > > > > +
> > > > > > > > +       case XDP_REDIRECT:
> > > > > > > > +               stats->xdp_redirects++;
> > > > > > > > +               err = xdp_do_redirect(dev, xdp, xdp_prog);
> > > > > > > > +               if (err)
> > > > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > > > +
> > > > > > > > +               *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > > > > > > +
> > > > > > > > +       default:
> > > > > > > > +               bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
> > > > > > > > +               fallthrough;
> > > > > > > > +       case XDP_ABORTED:
> > > > > > > > +               trace_xdp_exception(dev, xdp_prog, act);
> > > > > > > > +               fallthrough;
> > > > > > > > +       case XDP_DROP:
> > > > > > > > +               return VIRTNET_XDP_RES_DROP;
> > > > > > > > +       }
> > > > > > > > +}
> > > > > > > > +
> > > > > > > >  static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
> > > > > > > >  {
> > > > > > > >         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
> > > > > > > > @@ -876,7 +938,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > > > >         struct page *page = virt_to_head_page(buf);
> > > > > > > >         unsigned int delta = 0;
> > > > > > > >         struct page *xdp_page;
> > > > > > > > -       int err;
> > > > > > > >         unsigned int metasize = 0;
> > > > > > > >
> > > > > > > >         len -= vi->hdr_len;
> > > > > > > > @@ -898,7 +959,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > > > >         xdp_prog = rcu_dereference(rq->xdp_prog);
> > > > > > > >         if (xdp_prog) {
> > > > > > > >                 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
> > > > > > > > -               struct xdp_frame *xdpf;
> > > > > > > >                 struct xdp_buff xdp;
> > > > > > > >                 void *orig_data;
> > > > > > > >                 u32 act;
> > > > > > > > @@ -931,46 +991,22 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > > > >                 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
> > > > > > > >                                  xdp_headroom, len, true);
> > > > > > > >                 orig_data = xdp.data;
> > > > > > > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > > > > > > -               stats->xdp_packets++;
> > > > > > > > +
> > > > > > > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > > > > > >
> > > > > > > >                 switch (act) {
> > > > > > > > -               case XDP_PASS:
> > > > > > > > +               case VIRTNET_XDP_RES_PASS:
> > > > > > > >                         /* Recalculate length in case bpf program changed it */
> > > > > > > >                         delta = orig_data - xdp.data;
> > > > > > > >                         len = xdp.data_end - xdp.data;
> > > > > > > >                         metasize = xdp.data - xdp.data_meta;
> > > > > > > >                         break;
> > > > > > > > -               case XDP_TX:
> > > > > > > > -                       stats->xdp_tx++;
> > > > > > > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > > > > > > -                       if (unlikely(!xdpf))
> > > > > > > > -                               goto err_xdp;
> > > > > > > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > > > -                       if (unlikely(!err)) {
> > > > > > > > -                               xdp_return_frame_rx_napi(xdpf);
> > > > > > > > -                       } else if (unlikely(err < 0)) {
> > > > > > > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > > -                               goto err_xdp;
> > > > > > > > -                       }
> > > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > > > -                       rcu_read_unlock();
> > > > > > > > -                       goto xdp_xmit;
> > > > > > > > -               case XDP_REDIRECT:
> > > > > > > > -                       stats->xdp_redirects++;
> > > > > > > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > > > > > > -                       if (err)
> > > > > > > > -                               goto err_xdp;
> > > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > > > +
> > > > > > > > +               case VIRTNET_XDP_RES_CONSUMED:
> > > > > > > >                         rcu_read_unlock();
> > > > > > > >                         goto xdp_xmit;
> > > > > > > > -               default:
> > > > > > > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > > > > > > -                       fallthrough;
> > > > > > > > -               case XDP_ABORTED:
> > > > > > > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > > -                       goto err_xdp;
> > > > > > > > -               case XDP_DROP:
> > > > > > > > +
> > > > > > > > +               case VIRTNET_XDP_RES_DROP:
> > > > > > > >                         goto err_xdp;
> > > > > > > >                 }
> > > > > > > >         }
> > > > > > > > @@ -1277,7 +1313,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > > > > > > >         if (xdp_prog) {
> > > > > > > >                 unsigned int xdp_frags_truesz = 0;
> > > > > > > >                 struct skb_shared_info *shinfo;
> > > > > > > > -               struct xdp_frame *xdpf;
> > > > > > > >                 struct page *xdp_page;
> > > > > > > >                 struct xdp_buff xdp;
> > > > > > > >                 void *data;
> > > > > > > > @@ -1294,49 +1329,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > > > > > > >                 if (unlikely(err))
> > > > > > > >                         goto err_xdp_frags;
> > > > > > > >
> > > > > > > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > > > > > > -               stats->xdp_packets++;
> > > > > > > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > > > > > >
> > > > > > > >                 switch (act) {
> > > > > > > > -               case XDP_PASS:
> > > > > > > > +               case VIRTNET_XDP_RES_PASS:
> > > > > > > >                         head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
> > > > > > > >                         if (unlikely(!head_skb))
> > > > > > > >                                 goto err_xdp_frags;
> > > > > > > >
> > > > > > > >                         rcu_read_unlock();
> > > > > > > >                         return head_skb;
> > > > > > > > -               case XDP_TX:
> > > > > > > > -                       stats->xdp_tx++;
> > > > > > > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > > > > > > -                       if (unlikely(!xdpf)) {
> > > > > > > > -                               netdev_dbg(dev, "convert buff to frame failed for xdp\n");
> > > > > > >
> > > > > > > Nit: This debug is lost after the conversion.
> > > > > >
> > > > > > Will fix.
> > > > > >
> > > > > > Thanks.
> > > > > >
> > > > > > >
> > > > > > > Thanks
> > > > > > >
> > > > > > > > -                               goto err_xdp_frags;
> > > > > > > > -                       }
> > > > > > > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > > > -                       if (unlikely(!err)) {
> > > > > > > > -                               xdp_return_frame_rx_napi(xdpf);
> > > > > > > > -                       } else if (unlikely(err < 0)) {
> > > > > > > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > > -                               goto err_xdp_frags;
> > > > > > > > -                       }
> > > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > > > -                       rcu_read_unlock();
> > > > > > > > -                       goto xdp_xmit;
> > > > > > > > -               case XDP_REDIRECT:
> > > > > > > > -                       stats->xdp_redirects++;
> > > > > > > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > > > > > > -                       if (err)
> > > > > > > > -                               goto err_xdp_frags;
> > > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > > > +
> > > > > > > > +               case VIRTNET_XDP_RES_CONSUMED:
> > > > > > > >                         rcu_read_unlock();
> > > > > > > >                         goto xdp_xmit;
> > > > > > > > -               default:
> > > > > > > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > > > > > > -                       fallthrough;
> > > > > > > > -               case XDP_ABORTED:
> > > > > > > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > > -                       fallthrough;
> > > > > > > > -               case XDP_DROP:
> > > > > > > > +
> > > > > > > > +               case VIRTNET_XDP_RES_DROP:
> > > > > > > >                         goto err_xdp_frags;
> > > > > > > >                 }
> > > > > > > >  err_xdp_frags:
> > > > > > > > --
> > > > > > > > 2.32.0.3.g01195cf9f
> > > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
>
Jason Wang April 4, 2023, 8:03 a.m. UTC | #9
On Tue, Apr 4, 2023 at 3:12 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Tue, 4 Apr 2023 15:01:36 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Tue, Apr 4, 2023 at 2:55 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > On Tue, 4 Apr 2023 14:35:05 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > On Tue, Apr 4, 2023 at 2:22 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > >
> > > > > On Tue, 4 Apr 2023 13:04:02 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > > > On Mon, Apr 3, 2023 at 12:17 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > > > >
> > > > > > > On Mon, 3 Apr 2023 10:43:03 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > > > > > On Tue, Mar 28, 2023 at 8:04 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > > > > > >
> > > > > > > > > At present, we have two similar logic to perform the XDP prog.
> > > > > > > > >
> > > > > > > > > Therefore, this PATCH separates the code of executing XDP, which is
> > > > > > > > > conducive to later maintenance.
> > > > > > > > >
> > > > > > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > > > > > ---
> > > > > > > > >  drivers/net/virtio_net.c | 142 +++++++++++++++++++++------------------
> > > > > > > > >  1 file changed, 75 insertions(+), 67 deletions(-)
> > > > > > > > >
> > > > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > > > > > index bb426958cdd4..72b9d6ee4024 100644
> > > > > > > > > --- a/drivers/net/virtio_net.c
> > > > > > > > > +++ b/drivers/net/virtio_net.c
> > > > > > > > > @@ -301,6 +301,15 @@ struct padded_vnet_hdr {
> > > > > > > > >         char padding[12];
> > > > > > > > >  };
> > > > > > > > >
> > > > > > > > > +enum {
> > > > > > > > > +       /* xdp pass */
> > > > > > > > > +       VIRTNET_XDP_RES_PASS,
> > > > > > > > > +       /* drop packet. the caller needs to release the page. */
> > > > > > > > > +       VIRTNET_XDP_RES_DROP,
> > > > > > > > > +       /* packet is consumed by xdp. the caller needs to do nothing. */
> > > > > > > > > +       VIRTNET_XDP_RES_CONSUMED,
> > > > > > > > > +};
> > > > > > > >
> > > > > > > > I'd prefer this to be done on top unless it is a must. But I don't see
> > > > > > > > any advantage of introducing this, it's partial mapping of XDP action
> > > > > > > > and it needs to be extended when XDP action is extended. (And we've
> > > > > > > > already had: VIRTIO_XDP_REDIR and VIRTIO_XDP_TX ...)
> > > > > > >
> > > > > > > No, these are the three states of buffer after XDP processing.
> > > > > > >
> > > > > > > * PASS: goto make skb
> > > > > >
> > > > > > XDP_PASS goes for this.
> > > > > >
> > > > > > > * DROP: we should release buffer
> > > > > >
> > > > > > XDP_DROP and error conditions go with this.
> > > > > >
> > > > > > > * CUNSUMED: xdp prog used the buffer, we do nothing
> > > > > >
> > > > > > XDP_TX/XDP_REDIRECTION goes for this.
> > > > > >
> > > > > > So t virtnet_xdp_handler() just maps XDP ACTION plus the error
> > > > > > conditions to the above three states.
> > > > > >
> > > > > > We can simply map error to XDP_DROP like:
> > > > > >
> > > > > >        case XDP_TX:
> > > > > >               stats->xdp_tx++;
> > > > > >                xdpf = xdp_convert_buff_to_frame(xdp);
> > > > > >                if (unlikely(!xdpf))
> > > > > >                        return XDP_DROP;
> > > > > >
> > > > > > A good side effect is to avoid the xdp_xmit pointer to be passed to
> > > > > > the function.
> > > > >
> > > > >
> > > > > So, I guess you mean this:
> > > > >
> > > > >         switch (act) {
> > > > >         case XDP_PASS:
> > > > >                 /* handle pass */
> > > > >                 return skb;
> > > > >
> > > > >         case XDP_TX:
> > > > >                 *xdp_xmit |= VIRTIO_XDP_TX;
> > > > >                 goto xmit;
> > > > >
> > > > >         case XDP_REDIRECT:
> > > > >                 *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > >                 goto xmit;
> > > > >
> > > > >         case XDP_DROP:
> > > > >         default:
> > > > >                 goto err_xdp;
> > > > >         }
> > > > >
> > > > > I have to say there is no problem from the perspective of code implementation.
> > > >
> > > > Note that this is the current logic where it is determined in
> > > > receive_small() and receive_mergeable().
> > >
> > > Yes, but the purpose of this patches is to simplify the call.
> >
> > You mean simplify the receive_small()/mergeable()?
>
> YES.
>
>
> >
> > >
> > > >
> > > > >
> > > > > But if the a new ACTION liking XDP_TX,XDP_REDIRECT is added in the future, then
> > > > > we must modify all the callers.
> > > >
> > > > This is fine since we only use a single type for XDP action.
> > >
> > > a single type?
> >
> > Instead of (partial) duplicating XDP actions in the new enums.
>
>
> I think it's really misunderstand here. So your thought is these?
>
>    VIRTNET_XDP_RES_PASS,
>    VIRTNET_XDP_RES_TX_REDIRECT,
>    VIRTNET_XDP_RES_DROP,

No, I meant the enum you introduced.

>
>
>
> >
> > >
> > > >
> > > > > This is the benefit of using CUNSUMED.
> > > >
> > > > It's very hard to say, e.g if we want to support cloning in the future.
> > >
> > > cloning? You mean clone one new buffer.
> > >
> > > It is true that no matter what realization, the logic must be modified.
> >
> > Yes.
> >
> > >
> > > >
> > > > >
> > > > > I think it is a good advantage to put xdp_xmit in virtnet_xdp_handler(),
> > > > > which makes the caller not care too much about these details.
> > > >
> > > > This part I don't understand, having xdp_xmit means the caller need to
> > > > know whether it is xmited or redirected. The point of the enum is to
> > > > hide the XDP actions, but it's conflict with what xdp_xmit who want to
> > > > expose (part of) the XDP actions.
> > >
> > > I mean, no matter what virtnet_xdp_handler () returns? XDP_ACTION or some one I
> > > defined, I want to hide the modification of xdp_xmit to virtnet_xdp_handler().
> > >
> > > Even if virtnet_xdp_handler() returns XDP_TX, we can also complete the
> > > modification of XDP_XMIT within Virtnet_xdp_handler().
> > >
> > >
> > > >
> > > > > If you take into
> > > > > account the problem of increasing the number of parameters, I advise to put it
> > > > > in rq.
> > > >
> > > > I don't have strong opinion to introduce the enum,
> > >
> > > OK, I will drop these new enums.
> >
> > Just to make sure we are at the same page. I mean, if there is no
> > objection from others, I'm ok to have an enum, but we need to use a
> > separate patch to do that.
>
> Do you refer to introduce enums alone without virtnet_xdp_handler()?

I meant, having two patches

1) split out virtnet_xdp_handler() without introducing any new enums
2) introduce the new enum to simplify the codes

Thanks

>
> >
> > >
> > > > what I want to say
> > > > is, use a separated patch to do that.
> > >
> > > Does this part refer to putting xdp_xmit in rq?
> >
> > I mean it's better to be done separately. But I don't see the
> > advantage of this other than reducing the parameters.
>
> I think so also.
>
> Thanks.
>
>
> >
> > Thanks
> >
> > >
> > > Thanks.
> > >
> > >
> > > >
> > > > Thanks
> > > >
> > > > >
> > > > > Thanks.
> > > > >
> > > > >
> > > > >
> > > > > >
> > > > > > >
> > > > > > > The latter two are not particularly related to XDP ACTION. And it does not need
> > > > > > > to extend when XDP action is extended. At least I have not thought of this
> > > > > > > situation.
> > > > > >
> > > > > > What's the advantages of such indirection compared to using XDP action directly?
> > > > > >
> > > > > > Thanks
> > > > > >
> > > > > > >
> > > > > > >
> > > > > > > >
> > > > > > > > > +
> > > > > > > > >  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > > > > >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > > > > >
> > > > > > > > > @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> > > > > > > > >         return ret;
> > > > > > > > >  }
> > > > > > > > >
> > > > > > > > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> > > > > > > > > +                              struct net_device *dev,
> > > > > > > > > +                              unsigned int *xdp_xmit,
> > > > > > > > > +                              struct virtnet_rq_stats *stats)
> > > > > > > > > +{
> > > > > > > > > +       struct xdp_frame *xdpf;
> > > > > > > > > +       int err;
> > > > > > > > > +       u32 act;
> > > > > > > > > +
> > > > > > > > > +       act = bpf_prog_run_xdp(xdp_prog, xdp);
> > > > > > > > > +       stats->xdp_packets++;
> > > > > > > > > +
> > > > > > > > > +       switch (act) {
> > > > > > > > > +       case XDP_PASS:
> > > > > > > > > +               return VIRTNET_XDP_RES_PASS;
> > > > > > > > > +
> > > > > > > > > +       case XDP_TX:
> > > > > > > > > +               stats->xdp_tx++;
> > > > > > > > > +               xdpf = xdp_convert_buff_to_frame(xdp);
> > > > > > > > > +               if (unlikely(!xdpf))
> > > > > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > > > > +
> > > > > > > > > +               err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > > > > +               if (unlikely(!err)) {
> > > > > > > > > +                       xdp_return_frame_rx_napi(xdpf);
> > > > > > > > > +               } else if (unlikely(err < 0)) {
> > > > > > > > > +                       trace_xdp_exception(dev, xdp_prog, act);
> > > > > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > > > > +               }
> > > > > > > > > +
> > > > > > > > > +               *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > > > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > > > > > > > +
> > > > > > > > > +       case XDP_REDIRECT:
> > > > > > > > > +               stats->xdp_redirects++;
> > > > > > > > > +               err = xdp_do_redirect(dev, xdp, xdp_prog);
> > > > > > > > > +               if (err)
> > > > > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > > > > +
> > > > > > > > > +               *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > > > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > > > > > > > +
> > > > > > > > > +       default:
> > > > > > > > > +               bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
> > > > > > > > > +               fallthrough;
> > > > > > > > > +       case XDP_ABORTED:
> > > > > > > > > +               trace_xdp_exception(dev, xdp_prog, act);
> > > > > > > > > +               fallthrough;
> > > > > > > > > +       case XDP_DROP:
> > > > > > > > > +               return VIRTNET_XDP_RES_DROP;
> > > > > > > > > +       }
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > >  static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
> > > > > > > > >  {
> > > > > > > > >         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
> > > > > > > > > @@ -876,7 +938,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > > > > >         struct page *page = virt_to_head_page(buf);
> > > > > > > > >         unsigned int delta = 0;
> > > > > > > > >         struct page *xdp_page;
> > > > > > > > > -       int err;
> > > > > > > > >         unsigned int metasize = 0;
> > > > > > > > >
> > > > > > > > >         len -= vi->hdr_len;
> > > > > > > > > @@ -898,7 +959,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > > > > >         xdp_prog = rcu_dereference(rq->xdp_prog);
> > > > > > > > >         if (xdp_prog) {
> > > > > > > > >                 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
> > > > > > > > > -               struct xdp_frame *xdpf;
> > > > > > > > >                 struct xdp_buff xdp;
> > > > > > > > >                 void *orig_data;
> > > > > > > > >                 u32 act;
> > > > > > > > > @@ -931,46 +991,22 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > > > > >                 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
> > > > > > > > >                                  xdp_headroom, len, true);
> > > > > > > > >                 orig_data = xdp.data;
> > > > > > > > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > > > > > > > -               stats->xdp_packets++;
> > > > > > > > > +
> > > > > > > > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > > > > > > >
> > > > > > > > >                 switch (act) {
> > > > > > > > > -               case XDP_PASS:
> > > > > > > > > +               case VIRTNET_XDP_RES_PASS:
> > > > > > > > >                         /* Recalculate length in case bpf program changed it */
> > > > > > > > >                         delta = orig_data - xdp.data;
> > > > > > > > >                         len = xdp.data_end - xdp.data;
> > > > > > > > >                         metasize = xdp.data - xdp.data_meta;
> > > > > > > > >                         break;
> > > > > > > > > -               case XDP_TX:
> > > > > > > > > -                       stats->xdp_tx++;
> > > > > > > > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > > > > > > > -                       if (unlikely(!xdpf))
> > > > > > > > > -                               goto err_xdp;
> > > > > > > > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > > > > -                       if (unlikely(!err)) {
> > > > > > > > > -                               xdp_return_frame_rx_napi(xdpf);
> > > > > > > > > -                       } else if (unlikely(err < 0)) {
> > > > > > > > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > > > -                               goto err_xdp;
> > > > > > > > > -                       }
> > > > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > > > > -                       rcu_read_unlock();
> > > > > > > > > -                       goto xdp_xmit;
> > > > > > > > > -               case XDP_REDIRECT:
> > > > > > > > > -                       stats->xdp_redirects++;
> > > > > > > > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > > > > > > > -                       if (err)
> > > > > > > > > -                               goto err_xdp;
> > > > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > > > > +
> > > > > > > > > +               case VIRTNET_XDP_RES_CONSUMED:
> > > > > > > > >                         rcu_read_unlock();
> > > > > > > > >                         goto xdp_xmit;
> > > > > > > > > -               default:
> > > > > > > > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > > > > > > > -                       fallthrough;
> > > > > > > > > -               case XDP_ABORTED:
> > > > > > > > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > > > -                       goto err_xdp;
> > > > > > > > > -               case XDP_DROP:
> > > > > > > > > +
> > > > > > > > > +               case VIRTNET_XDP_RES_DROP:
> > > > > > > > >                         goto err_xdp;
> > > > > > > > >                 }
> > > > > > > > >         }
> > > > > > > > > @@ -1277,7 +1313,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > > > > > > > >         if (xdp_prog) {
> > > > > > > > >                 unsigned int xdp_frags_truesz = 0;
> > > > > > > > >                 struct skb_shared_info *shinfo;
> > > > > > > > > -               struct xdp_frame *xdpf;
> > > > > > > > >                 struct page *xdp_page;
> > > > > > > > >                 struct xdp_buff xdp;
> > > > > > > > >                 void *data;
> > > > > > > > > @@ -1294,49 +1329,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > > > > > > > >                 if (unlikely(err))
> > > > > > > > >                         goto err_xdp_frags;
> > > > > > > > >
> > > > > > > > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > > > > > > > -               stats->xdp_packets++;
> > > > > > > > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > > > > > > >
> > > > > > > > >                 switch (act) {
> > > > > > > > > -               case XDP_PASS:
> > > > > > > > > +               case VIRTNET_XDP_RES_PASS:
> > > > > > > > >                         head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
> > > > > > > > >                         if (unlikely(!head_skb))
> > > > > > > > >                                 goto err_xdp_frags;
> > > > > > > > >
> > > > > > > > >                         rcu_read_unlock();
> > > > > > > > >                         return head_skb;
> > > > > > > > > -               case XDP_TX:
> > > > > > > > > -                       stats->xdp_tx++;
> > > > > > > > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > > > > > > > -                       if (unlikely(!xdpf)) {
> > > > > > > > > -                               netdev_dbg(dev, "convert buff to frame failed for xdp\n");
> > > > > > > >
> > > > > > > > Nit: This debug is lost after the conversion.
> > > > > > >
> > > > > > > Will fix.
> > > > > > >
> > > > > > > Thanks.
> > > > > > >
> > > > > > > >
> > > > > > > > Thanks
> > > > > > > >
> > > > > > > > > -                               goto err_xdp_frags;
> > > > > > > > > -                       }
> > > > > > > > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > > > > -                       if (unlikely(!err)) {
> > > > > > > > > -                               xdp_return_frame_rx_napi(xdpf);
> > > > > > > > > -                       } else if (unlikely(err < 0)) {
> > > > > > > > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > > > -                               goto err_xdp_frags;
> > > > > > > > > -                       }
> > > > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > > > > -                       rcu_read_unlock();
> > > > > > > > > -                       goto xdp_xmit;
> > > > > > > > > -               case XDP_REDIRECT:
> > > > > > > > > -                       stats->xdp_redirects++;
> > > > > > > > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > > > > > > > -                       if (err)
> > > > > > > > > -                               goto err_xdp_frags;
> > > > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > > > > +
> > > > > > > > > +               case VIRTNET_XDP_RES_CONSUMED:
> > > > > > > > >                         rcu_read_unlock();
> > > > > > > > >                         goto xdp_xmit;
> > > > > > > > > -               default:
> > > > > > > > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > > > > > > > -                       fallthrough;
> > > > > > > > > -               case XDP_ABORTED:
> > > > > > > > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > > > -                       fallthrough;
> > > > > > > > > -               case XDP_DROP:
> > > > > > > > > +
> > > > > > > > > +               case VIRTNET_XDP_RES_DROP:
> > > > > > > > >                         goto err_xdp_frags;
> > > > > > > > >                 }
> > > > > > > > >  err_xdp_frags:
> > > > > > > > > --
> > > > > > > > > 2.32.0.3.g01195cf9f
> > > > > > > > >
> > > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
>
Xuan Zhuo April 4, 2023, 8:09 a.m. UTC | #10
On Tue, 4 Apr 2023 16:03:49 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Tue, Apr 4, 2023 at 3:12 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Tue, 4 Apr 2023 15:01:36 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > On Tue, Apr 4, 2023 at 2:55 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > >
> > > > On Tue, 4 Apr 2023 14:35:05 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > > On Tue, Apr 4, 2023 at 2:22 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > > >
> > > > > > On Tue, 4 Apr 2023 13:04:02 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > > > > On Mon, Apr 3, 2023 at 12:17 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > > > > >
> > > > > > > > On Mon, 3 Apr 2023 10:43:03 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > > > > > > On Tue, Mar 28, 2023 at 8:04 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > > > > > > >
> > > > > > > > > > At present, we have two similar logic to perform the XDP prog.
> > > > > > > > > >
> > > > > > > > > > Therefore, this PATCH separates the code of executing XDP, which is
> > > > > > > > > > conducive to later maintenance.
> > > > > > > > > >
> > > > > > > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > > > > > > ---
> > > > > > > > > >  drivers/net/virtio_net.c | 142 +++++++++++++++++++++------------------
> > > > > > > > > >  1 file changed, 75 insertions(+), 67 deletions(-)
> > > > > > > > > >
> > > > > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > > > > > > index bb426958cdd4..72b9d6ee4024 100644
> > > > > > > > > > --- a/drivers/net/virtio_net.c
> > > > > > > > > > +++ b/drivers/net/virtio_net.c
> > > > > > > > > > @@ -301,6 +301,15 @@ struct padded_vnet_hdr {
> > > > > > > > > >         char padding[12];
> > > > > > > > > >  };
> > > > > > > > > >
> > > > > > > > > > +enum {
> > > > > > > > > > +       /* xdp pass */
> > > > > > > > > > +       VIRTNET_XDP_RES_PASS,
> > > > > > > > > > +       /* drop packet. the caller needs to release the page. */
> > > > > > > > > > +       VIRTNET_XDP_RES_DROP,
> > > > > > > > > > +       /* packet is consumed by xdp. the caller needs to do nothing. */
> > > > > > > > > > +       VIRTNET_XDP_RES_CONSUMED,
> > > > > > > > > > +};
> > > > > > > > >
> > > > > > > > > I'd prefer this to be done on top unless it is a must. But I don't see
> > > > > > > > > any advantage of introducing this, it's partial mapping of XDP action
> > > > > > > > > and it needs to be extended when XDP action is extended. (And we've
> > > > > > > > > already had: VIRTIO_XDP_REDIR and VIRTIO_XDP_TX ...)
> > > > > > > >
> > > > > > > > No, these are the three states of buffer after XDP processing.
> > > > > > > >
> > > > > > > > * PASS: goto make skb
> > > > > > >
> > > > > > > XDP_PASS goes for this.
> > > > > > >
> > > > > > > > * DROP: we should release buffer
> > > > > > >
> > > > > > > XDP_DROP and error conditions go with this.
> > > > > > >
> > > > > > > > * CUNSUMED: xdp prog used the buffer, we do nothing
> > > > > > >
> > > > > > > XDP_TX/XDP_REDIRECTION goes for this.
> > > > > > >
> > > > > > > So t virtnet_xdp_handler() just maps XDP ACTION plus the error
> > > > > > > conditions to the above three states.
> > > > > > >
> > > > > > > We can simply map error to XDP_DROP like:
> > > > > > >
> > > > > > >        case XDP_TX:
> > > > > > >               stats->xdp_tx++;
> > > > > > >                xdpf = xdp_convert_buff_to_frame(xdp);
> > > > > > >                if (unlikely(!xdpf))
> > > > > > >                        return XDP_DROP;
> > > > > > >
> > > > > > > A good side effect is to avoid the xdp_xmit pointer to be passed to
> > > > > > > the function.
> > > > > >
> > > > > >
> > > > > > So, I guess you mean this:
> > > > > >
> > > > > >         switch (act) {
> > > > > >         case XDP_PASS:
> > > > > >                 /* handle pass */
> > > > > >                 return skb;
> > > > > >
> > > > > >         case XDP_TX:
> > > > > >                 *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > >                 goto xmit;
> > > > > >
> > > > > >         case XDP_REDIRECT:
> > > > > >                 *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > >                 goto xmit;
> > > > > >
> > > > > >         case XDP_DROP:
> > > > > >         default:
> > > > > >                 goto err_xdp;
> > > > > >         }
> > > > > >
> > > > > > I have to say there is no problem from the perspective of code implementation.
> > > > >
> > > > > Note that this is the current logic where it is determined in
> > > > > receive_small() and receive_mergeable().
> > > >
> > > > Yes, but the purpose of this patches is to simplify the call.
> > >
> > > You mean simplify the receive_small()/mergeable()?
> >
> > YES.
> >
> >
> > >
> > > >
> > > > >
> > > > > >
> > > > > > But if the a new ACTION liking XDP_TX,XDP_REDIRECT is added in the future, then
> > > > > > we must modify all the callers.
> > > > >
> > > > > This is fine since we only use a single type for XDP action.
> > > >
> > > > a single type?
> > >
> > > Instead of (partial) duplicating XDP actions in the new enums.
> >
> >
> > I think it's really misunderstand here. So your thought is these?
> >
> >    VIRTNET_XDP_RES_PASS,
> >    VIRTNET_XDP_RES_TX_REDIRECT,
> >    VIRTNET_XDP_RES_DROP,
>
> No, I meant the enum you introduced.
>
> >
> >
> >
> > >
> > > >
> > > > >
> > > > > > This is the benefit of using CUNSUMED.
> > > > >
> > > > > It's very hard to say, e.g if we want to support cloning in the future.
> > > >
> > > > cloning? You mean clone one new buffer.
> > > >
> > > > It is true that no matter what realization, the logic must be modified.
> > >
> > > Yes.
> > >
> > > >
> > > > >
> > > > > >
> > > > > > I think it is a good advantage to put xdp_xmit in virtnet_xdp_handler(),
> > > > > > which makes the caller not care too much about these details.
> > > > >
> > > > > This part I don't understand, having xdp_xmit means the caller need to
> > > > > know whether it is xmited or redirected. The point of the enum is to
> > > > > hide the XDP actions, but it's conflict with what xdp_xmit who want to
> > > > > expose (part of) the XDP actions.
> > > >
> > > > I mean, no matter what virtnet_xdp_handler () returns? XDP_ACTION or some one I
> > > > defined, I want to hide the modification of xdp_xmit to virtnet_xdp_handler().
> > > >
> > > > Even if virtnet_xdp_handler() returns XDP_TX, we can also complete the
> > > > modification of XDP_XMIT within Virtnet_xdp_handler().
> > > >
> > > >
> > > > >
> > > > > > If you take into
> > > > > > account the problem of increasing the number of parameters, I advise to put it
> > > > > > in rq.
> > > > >
> > > > > I don't have strong opinion to introduce the enum,
> > > >
> > > > OK, I will drop these new enums.
> > >
> > > Just to make sure we are at the same page. I mean, if there is no
> > > objection from others, I'm ok to have an enum, but we need to use a
> > > separate patch to do that.
> >
> > Do you refer to introduce enums alone without virtnet_xdp_handler()?
>
> I meant, having two patches
>
> 1) split out virtnet_xdp_handler() without introducing any new enums
> 2) introduce the new enum to simplify the codes

OK. I see.

Thanks.


>
> Thanks
>
> >
> > >
> > > >
> > > > > what I want to say
> > > > > is, use a separated patch to do that.
> > > >
> > > > Does this part refer to putting xdp_xmit in rq?
> > >
> > > I mean it's better to be done separately. But I don't see the
> > > advantage of this other than reducing the parameters.
> >
> > I think so also.
> >
> > Thanks.
> >
> >
> > >
> > > Thanks
> > >
> > > >
> > > > Thanks.
> > > >
> > > >
> > > > >
> > > > > Thanks
> > > > >
> > > > > >
> > > > > > Thanks.
> > > > > >
> > > > > >
> > > > > >
> > > > > > >
> > > > > > > >
> > > > > > > > The latter two are not particularly related to XDP ACTION. And it does not need
> > > > > > > > to extend when XDP action is extended. At least I have not thought of this
> > > > > > > > situation.
> > > > > > >
> > > > > > > What's the advantages of such indirection compared to using XDP action directly?
> > > > > > >
> > > > > > > Thanks
> > > > > > >
> > > > > > > >
> > > > > > > >
> > > > > > > > >
> > > > > > > > > > +
> > > > > > > > > >  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > > > > > >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > > > > > >
> > > > > > > > > > @@ -789,6 +798,59 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> > > > > > > > > >         return ret;
> > > > > > > > > >  }
> > > > > > > > > >
> > > > > > > > > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> > > > > > > > > > +                              struct net_device *dev,
> > > > > > > > > > +                              unsigned int *xdp_xmit,
> > > > > > > > > > +                              struct virtnet_rq_stats *stats)
> > > > > > > > > > +{
> > > > > > > > > > +       struct xdp_frame *xdpf;
> > > > > > > > > > +       int err;
> > > > > > > > > > +       u32 act;
> > > > > > > > > > +
> > > > > > > > > > +       act = bpf_prog_run_xdp(xdp_prog, xdp);
> > > > > > > > > > +       stats->xdp_packets++;
> > > > > > > > > > +
> > > > > > > > > > +       switch (act) {
> > > > > > > > > > +       case XDP_PASS:
> > > > > > > > > > +               return VIRTNET_XDP_RES_PASS;
> > > > > > > > > > +
> > > > > > > > > > +       case XDP_TX:
> > > > > > > > > > +               stats->xdp_tx++;
> > > > > > > > > > +               xdpf = xdp_convert_buff_to_frame(xdp);
> > > > > > > > > > +               if (unlikely(!xdpf))
> > > > > > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > > > > > +
> > > > > > > > > > +               err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > > > > > +               if (unlikely(!err)) {
> > > > > > > > > > +                       xdp_return_frame_rx_napi(xdpf);
> > > > > > > > > > +               } else if (unlikely(err < 0)) {
> > > > > > > > > > +                       trace_xdp_exception(dev, xdp_prog, act);
> > > > > > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > > > > > +               }
> > > > > > > > > > +
> > > > > > > > > > +               *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > > > > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > > > > > > > > +
> > > > > > > > > > +       case XDP_REDIRECT:
> > > > > > > > > > +               stats->xdp_redirects++;
> > > > > > > > > > +               err = xdp_do_redirect(dev, xdp, xdp_prog);
> > > > > > > > > > +               if (err)
> > > > > > > > > > +                       return VIRTNET_XDP_RES_DROP;
> > > > > > > > > > +
> > > > > > > > > > +               *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > > > > > +               return VIRTNET_XDP_RES_CONSUMED;
> > > > > > > > > > +
> > > > > > > > > > +       default:
> > > > > > > > > > +               bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
> > > > > > > > > > +               fallthrough;
> > > > > > > > > > +       case XDP_ABORTED:
> > > > > > > > > > +               trace_xdp_exception(dev, xdp_prog, act);
> > > > > > > > > > +               fallthrough;
> > > > > > > > > > +       case XDP_DROP:
> > > > > > > > > > +               return VIRTNET_XDP_RES_DROP;
> > > > > > > > > > +       }
> > > > > > > > > > +}
> > > > > > > > > > +
> > > > > > > > > >  static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
> > > > > > > > > >  {
> > > > > > > > > >         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
> > > > > > > > > > @@ -876,7 +938,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > > > > > >         struct page *page = virt_to_head_page(buf);
> > > > > > > > > >         unsigned int delta = 0;
> > > > > > > > > >         struct page *xdp_page;
> > > > > > > > > > -       int err;
> > > > > > > > > >         unsigned int metasize = 0;
> > > > > > > > > >
> > > > > > > > > >         len -= vi->hdr_len;
> > > > > > > > > > @@ -898,7 +959,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > > > > > >         xdp_prog = rcu_dereference(rq->xdp_prog);
> > > > > > > > > >         if (xdp_prog) {
> > > > > > > > > >                 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
> > > > > > > > > > -               struct xdp_frame *xdpf;
> > > > > > > > > >                 struct xdp_buff xdp;
> > > > > > > > > >                 void *orig_data;
> > > > > > > > > >                 u32 act;
> > > > > > > > > > @@ -931,46 +991,22 @@ static struct sk_buff *receive_small(struct net_device *dev,
> > > > > > > > > >                 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
> > > > > > > > > >                                  xdp_headroom, len, true);
> > > > > > > > > >                 orig_data = xdp.data;
> > > > > > > > > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > > > > > > > > -               stats->xdp_packets++;
> > > > > > > > > > +
> > > > > > > > > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > > > > > > > >
> > > > > > > > > >                 switch (act) {
> > > > > > > > > > -               case XDP_PASS:
> > > > > > > > > > +               case VIRTNET_XDP_RES_PASS:
> > > > > > > > > >                         /* Recalculate length in case bpf program changed it */
> > > > > > > > > >                         delta = orig_data - xdp.data;
> > > > > > > > > >                         len = xdp.data_end - xdp.data;
> > > > > > > > > >                         metasize = xdp.data - xdp.data_meta;
> > > > > > > > > >                         break;
> > > > > > > > > > -               case XDP_TX:
> > > > > > > > > > -                       stats->xdp_tx++;
> > > > > > > > > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > > > > > > > > -                       if (unlikely(!xdpf))
> > > > > > > > > > -                               goto err_xdp;
> > > > > > > > > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > > > > > -                       if (unlikely(!err)) {
> > > > > > > > > > -                               xdp_return_frame_rx_napi(xdpf);
> > > > > > > > > > -                       } else if (unlikely(err < 0)) {
> > > > > > > > > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > > > > -                               goto err_xdp;
> > > > > > > > > > -                       }
> > > > > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > > > > > -                       rcu_read_unlock();
> > > > > > > > > > -                       goto xdp_xmit;
> > > > > > > > > > -               case XDP_REDIRECT:
> > > > > > > > > > -                       stats->xdp_redirects++;
> > > > > > > > > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > > > > > > > > -                       if (err)
> > > > > > > > > > -                               goto err_xdp;
> > > > > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > > > > > +
> > > > > > > > > > +               case VIRTNET_XDP_RES_CONSUMED:
> > > > > > > > > >                         rcu_read_unlock();
> > > > > > > > > >                         goto xdp_xmit;
> > > > > > > > > > -               default:
> > > > > > > > > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > > > > > > > > -                       fallthrough;
> > > > > > > > > > -               case XDP_ABORTED:
> > > > > > > > > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > > > > -                       goto err_xdp;
> > > > > > > > > > -               case XDP_DROP:
> > > > > > > > > > +
> > > > > > > > > > +               case VIRTNET_XDP_RES_DROP:
> > > > > > > > > >                         goto err_xdp;
> > > > > > > > > >                 }
> > > > > > > > > >         }
> > > > > > > > > > @@ -1277,7 +1313,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > > > > > > > > >         if (xdp_prog) {
> > > > > > > > > >                 unsigned int xdp_frags_truesz = 0;
> > > > > > > > > >                 struct skb_shared_info *shinfo;
> > > > > > > > > > -               struct xdp_frame *xdpf;
> > > > > > > > > >                 struct page *xdp_page;
> > > > > > > > > >                 struct xdp_buff xdp;
> > > > > > > > > >                 void *data;
> > > > > > > > > > @@ -1294,49 +1329,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> > > > > > > > > >                 if (unlikely(err))
> > > > > > > > > >                         goto err_xdp_frags;
> > > > > > > > > >
> > > > > > > > > > -               act = bpf_prog_run_xdp(xdp_prog, &xdp);
> > > > > > > > > > -               stats->xdp_packets++;
> > > > > > > > > > +               act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
> > > > > > > > > >
> > > > > > > > > >                 switch (act) {
> > > > > > > > > > -               case XDP_PASS:
> > > > > > > > > > +               case VIRTNET_XDP_RES_PASS:
> > > > > > > > > >                         head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
> > > > > > > > > >                         if (unlikely(!head_skb))
> > > > > > > > > >                                 goto err_xdp_frags;
> > > > > > > > > >
> > > > > > > > > >                         rcu_read_unlock();
> > > > > > > > > >                         return head_skb;
> > > > > > > > > > -               case XDP_TX:
> > > > > > > > > > -                       stats->xdp_tx++;
> > > > > > > > > > -                       xdpf = xdp_convert_buff_to_frame(&xdp);
> > > > > > > > > > -                       if (unlikely(!xdpf)) {
> > > > > > > > > > -                               netdev_dbg(dev, "convert buff to frame failed for xdp\n");
> > > > > > > > >
> > > > > > > > > Nit: This debug is lost after the conversion.
> > > > > > > >
> > > > > > > > Will fix.
> > > > > > > >
> > > > > > > > Thanks.
> > > > > > > >
> > > > > > > > >
> > > > > > > > > Thanks
> > > > > > > > >
> > > > > > > > > > -                               goto err_xdp_frags;
> > > > > > > > > > -                       }
> > > > > > > > > > -                       err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> > > > > > > > > > -                       if (unlikely(!err)) {
> > > > > > > > > > -                               xdp_return_frame_rx_napi(xdpf);
> > > > > > > > > > -                       } else if (unlikely(err < 0)) {
> > > > > > > > > > -                               trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > > > > -                               goto err_xdp_frags;
> > > > > > > > > > -                       }
> > > > > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_TX;
> > > > > > > > > > -                       rcu_read_unlock();
> > > > > > > > > > -                       goto xdp_xmit;
> > > > > > > > > > -               case XDP_REDIRECT:
> > > > > > > > > > -                       stats->xdp_redirects++;
> > > > > > > > > > -                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
> > > > > > > > > > -                       if (err)
> > > > > > > > > > -                               goto err_xdp_frags;
> > > > > > > > > > -                       *xdp_xmit |= VIRTIO_XDP_REDIR;
> > > > > > > > > > +
> > > > > > > > > > +               case VIRTNET_XDP_RES_CONSUMED:
> > > > > > > > > >                         rcu_read_unlock();
> > > > > > > > > >                         goto xdp_xmit;
> > > > > > > > > > -               default:
> > > > > > > > > > -                       bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
> > > > > > > > > > -                       fallthrough;
> > > > > > > > > > -               case XDP_ABORTED:
> > > > > > > > > > -                       trace_xdp_exception(vi->dev, xdp_prog, act);
> > > > > > > > > > -                       fallthrough;
> > > > > > > > > > -               case XDP_DROP:
> > > > > > > > > > +
> > > > > > > > > > +               case VIRTNET_XDP_RES_DROP:
> > > > > > > > > >                         goto err_xdp_frags;
> > > > > > > > > >                 }
> > > > > > > > > >  err_xdp_frags:
> > > > > > > > > > --
> > > > > > > > > > 2.32.0.3.g01195cf9f
> > > > > > > > > >
> > > > > > > > >
> > > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
>
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb426958cdd4..72b9d6ee4024 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -301,6 +301,15 @@  struct padded_vnet_hdr {
 	char padding[12];
 };
 
+enum {
+	/* xdp pass */
+	VIRTNET_XDP_RES_PASS,
+	/* drop packet. the caller needs to release the page. */
+	VIRTNET_XDP_RES_DROP,
+	/* packet is consumed by xdp. the caller needs to do nothing. */
+	VIRTNET_XDP_RES_CONSUMED,
+};
+
 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
 
@@ -789,6 +798,59 @@  static int virtnet_xdp_xmit(struct net_device *dev,
 	return ret;
 }
 
+static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+			       struct net_device *dev,
+			       unsigned int *xdp_xmit,
+			       struct virtnet_rq_stats *stats)
+{
+	struct xdp_frame *xdpf;
+	int err;
+	u32 act;
+
+	act = bpf_prog_run_xdp(xdp_prog, xdp);
+	stats->xdp_packets++;
+
+	switch (act) {
+	case XDP_PASS:
+		return VIRTNET_XDP_RES_PASS;
+
+	case XDP_TX:
+		stats->xdp_tx++;
+		xdpf = xdp_convert_buff_to_frame(xdp);
+		if (unlikely(!xdpf))
+			return VIRTNET_XDP_RES_DROP;
+
+		err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
+		if (unlikely(!err)) {
+			xdp_return_frame_rx_napi(xdpf);
+		} else if (unlikely(err < 0)) {
+			trace_xdp_exception(dev, xdp_prog, act);
+			return VIRTNET_XDP_RES_DROP;
+		}
+
+		*xdp_xmit |= VIRTIO_XDP_TX;
+		return VIRTNET_XDP_RES_CONSUMED;
+
+	case XDP_REDIRECT:
+		stats->xdp_redirects++;
+		err = xdp_do_redirect(dev, xdp, xdp_prog);
+		if (err)
+			return VIRTNET_XDP_RES_DROP;
+
+		*xdp_xmit |= VIRTIO_XDP_REDIR;
+		return VIRTNET_XDP_RES_CONSUMED;
+
+	default:
+		bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
+		fallthrough;
+	case XDP_ABORTED:
+		trace_xdp_exception(dev, xdp_prog, act);
+		fallthrough;
+	case XDP_DROP:
+		return VIRTNET_XDP_RES_DROP;
+	}
+}
+
 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
 {
 	return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
@@ -876,7 +938,6 @@  static struct sk_buff *receive_small(struct net_device *dev,
 	struct page *page = virt_to_head_page(buf);
 	unsigned int delta = 0;
 	struct page *xdp_page;
-	int err;
 	unsigned int metasize = 0;
 
 	len -= vi->hdr_len;
@@ -898,7 +959,6 @@  static struct sk_buff *receive_small(struct net_device *dev,
 	xdp_prog = rcu_dereference(rq->xdp_prog);
 	if (xdp_prog) {
 		struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
-		struct xdp_frame *xdpf;
 		struct xdp_buff xdp;
 		void *orig_data;
 		u32 act;
@@ -931,46 +991,22 @@  static struct sk_buff *receive_small(struct net_device *dev,
 		xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
 				 xdp_headroom, len, true);
 		orig_data = xdp.data;
-		act = bpf_prog_run_xdp(xdp_prog, &xdp);
-		stats->xdp_packets++;
+
+		act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
 
 		switch (act) {
-		case XDP_PASS:
+		case VIRTNET_XDP_RES_PASS:
 			/* Recalculate length in case bpf program changed it */
 			delta = orig_data - xdp.data;
 			len = xdp.data_end - xdp.data;
 			metasize = xdp.data - xdp.data_meta;
 			break;
-		case XDP_TX:
-			stats->xdp_tx++;
-			xdpf = xdp_convert_buff_to_frame(&xdp);
-			if (unlikely(!xdpf))
-				goto err_xdp;
-			err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
-			if (unlikely(!err)) {
-				xdp_return_frame_rx_napi(xdpf);
-			} else if (unlikely(err < 0)) {
-				trace_xdp_exception(vi->dev, xdp_prog, act);
-				goto err_xdp;
-			}
-			*xdp_xmit |= VIRTIO_XDP_TX;
-			rcu_read_unlock();
-			goto xdp_xmit;
-		case XDP_REDIRECT:
-			stats->xdp_redirects++;
-			err = xdp_do_redirect(dev, &xdp, xdp_prog);
-			if (err)
-				goto err_xdp;
-			*xdp_xmit |= VIRTIO_XDP_REDIR;
+
+		case VIRTNET_XDP_RES_CONSUMED:
 			rcu_read_unlock();
 			goto xdp_xmit;
-		default:
-			bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
-			fallthrough;
-		case XDP_ABORTED:
-			trace_xdp_exception(vi->dev, xdp_prog, act);
-			goto err_xdp;
-		case XDP_DROP:
+
+		case VIRTNET_XDP_RES_DROP:
 			goto err_xdp;
 		}
 	}
@@ -1277,7 +1313,6 @@  static struct sk_buff *receive_mergeable(struct net_device *dev,
 	if (xdp_prog) {
 		unsigned int xdp_frags_truesz = 0;
 		struct skb_shared_info *shinfo;
-		struct xdp_frame *xdpf;
 		struct page *xdp_page;
 		struct xdp_buff xdp;
 		void *data;
@@ -1294,49 +1329,22 @@  static struct sk_buff *receive_mergeable(struct net_device *dev,
 		if (unlikely(err))
 			goto err_xdp_frags;
 
-		act = bpf_prog_run_xdp(xdp_prog, &xdp);
-		stats->xdp_packets++;
+		act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
 
 		switch (act) {
-		case XDP_PASS:
+		case VIRTNET_XDP_RES_PASS:
 			head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
 			if (unlikely(!head_skb))
 				goto err_xdp_frags;
 
 			rcu_read_unlock();
 			return head_skb;
-		case XDP_TX:
-			stats->xdp_tx++;
-			xdpf = xdp_convert_buff_to_frame(&xdp);
-			if (unlikely(!xdpf)) {
-				netdev_dbg(dev, "convert buff to frame failed for xdp\n");
-				goto err_xdp_frags;
-			}
-			err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
-			if (unlikely(!err)) {
-				xdp_return_frame_rx_napi(xdpf);
-			} else if (unlikely(err < 0)) {
-				trace_xdp_exception(vi->dev, xdp_prog, act);
-				goto err_xdp_frags;
-			}
-			*xdp_xmit |= VIRTIO_XDP_TX;
-			rcu_read_unlock();
-			goto xdp_xmit;
-		case XDP_REDIRECT:
-			stats->xdp_redirects++;
-			err = xdp_do_redirect(dev, &xdp, xdp_prog);
-			if (err)
-				goto err_xdp_frags;
-			*xdp_xmit |= VIRTIO_XDP_REDIR;
+
+		case VIRTNET_XDP_RES_CONSUMED:
 			rcu_read_unlock();
 			goto xdp_xmit;
-		default:
-			bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
-			fallthrough;
-		case XDP_ABORTED:
-			trace_xdp_exception(vi->dev, xdp_prog, act);
-			fallthrough;
-		case XDP_DROP:
+
+		case VIRTNET_XDP_RES_DROP:
 			goto err_xdp_frags;
 		}
 err_xdp_frags: