Message ID | 20210331071139.15473-8-xuanzhuo@linux.alibaba.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | virtio-net support xdp socket zero copy xmit | expand |
Context | Check | Description |
---|---|---|
netdev/cover_letter | success | Link |
netdev/fixes_present | success | Link |
netdev/patch_count | success | Link |
netdev/tree_selection | success | Clearly marked for net-next |
netdev/subject_prefix | success | Link |
netdev/cc_maintainers | success | CCed 15 of 15 maintainers |
netdev/source_inline | success | Was 0 now: 0 |
netdev/verify_signedoff | success | Link |
netdev/module_param | success | Was 0 now: 0 |
netdev/build_32bit | success | Errors and warnings before: 0 this patch: 0 |
netdev/kdoc | success | Errors and warnings before: 0 this patch: 0 |
netdev/verify_fixes | success | Link |
netdev/checkpatch | success | total: 0 errors, 0 warnings, 0 checks, 45 lines checked |
netdev/build_allmodconfig_warn | success | Errors and warnings before: 0 this patch: 0 |
netdev/header_inline | success | Link |
在 2021/3/31 下午3:11, Xuan Zhuo 写道: > poll tx call virtnet_xsk_run, then the data in the xsk tx queue will be > continuously consumed by napi. > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> > Reviewed-by: Dust Li <dust.li@linux.alibaba.com> I think we need squash this into patch 4, it looks more like a bug fix to me. > --- > drivers/net/virtio_net.c | 20 +++++++++++++++++--- > 1 file changed, 17 insertions(+), 3 deletions(-) > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > index d7e95f55478d..fac7d0020013 100644 > --- a/drivers/net/virtio_net.c > +++ b/drivers/net/virtio_net.c > @@ -264,6 +264,9 @@ struct padded_vnet_hdr { > char padding[4]; > }; > > +static int virtnet_xsk_run(struct send_queue *sq, struct xsk_buff_pool *pool, > + int budget, bool in_napi); > + > static bool is_xdp_frame(void *ptr) > { > return (unsigned long)ptr & VIRTIO_XDP_FLAG; > @@ -1553,7 +1556,9 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) > struct send_queue *sq = container_of(napi, struct send_queue, napi); > struct virtnet_info *vi = sq->vq->vdev->priv; > unsigned int index = vq2txq(sq->vq); > + struct xsk_buff_pool *pool; > struct netdev_queue *txq; > + int work = 0; > > if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { > /* We don't need to enable cb for XDP */ > @@ -1563,15 +1568,24 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) > > txq = netdev_get_tx_queue(vi->dev, index); > __netif_tx_lock(txq, raw_smp_processor_id()); > - free_old_xmit_skbs(sq, true); > + rcu_read_lock(); > + pool = rcu_dereference(sq->xsk.pool); > + if (pool) { > + work = virtnet_xsk_run(sq, pool, budget, true); > + rcu_read_unlock(); > + } else { > + rcu_read_unlock(); > + free_old_xmit_skbs(sq, true); > + } > __netif_tx_unlock(txq); > > - virtqueue_napi_complete(napi, sq->vq, 0); > + if (work < budget) > + virtqueue_napi_complete(napi, sq->vq, 0); > > if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) > netif_tx_wake_queue(txq); > > - return 0; > + return work; Need a separate patch to "fix" the budget returned by poll_tx here. Thanks > } > > static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d7e95f55478d..fac7d0020013 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -264,6 +264,9 @@ struct padded_vnet_hdr { char padding[4]; }; +static int virtnet_xsk_run(struct send_queue *sq, struct xsk_buff_pool *pool, + int budget, bool in_napi); + static bool is_xdp_frame(void *ptr) { return (unsigned long)ptr & VIRTIO_XDP_FLAG; @@ -1553,7 +1556,9 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) struct send_queue *sq = container_of(napi, struct send_queue, napi); struct virtnet_info *vi = sq->vq->vdev->priv; unsigned int index = vq2txq(sq->vq); + struct xsk_buff_pool *pool; struct netdev_queue *txq; + int work = 0; if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { /* We don't need to enable cb for XDP */ @@ -1563,15 +1568,24 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) txq = netdev_get_tx_queue(vi->dev, index); __netif_tx_lock(txq, raw_smp_processor_id()); - free_old_xmit_skbs(sq, true); + rcu_read_lock(); + pool = rcu_dereference(sq->xsk.pool); + if (pool) { + work = virtnet_xsk_run(sq, pool, budget, true); + rcu_read_unlock(); + } else { + rcu_read_unlock(); + free_old_xmit_skbs(sq, true); + } __netif_tx_unlock(txq); - virtqueue_napi_complete(napi, sq->vq, 0); + if (work < budget) + virtqueue_napi_complete(napi, sq->vq, 0); if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) netif_tx_wake_queue(txq); - return 0; + return work; } static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)