@@ -176,6 +176,8 @@ struct send_queue {
dma_addr_t hdr_dma_address;
u32 last_cpu;
+
+ bool need_wakeup;
} xsk;
};
@@ -296,8 +298,7 @@ static void __free_old_xmit(struct send_queue *sq, bool in_napi,
stats->packets++;
}
- if (xsknum)
- xsk_tx_completed(sq->xsk.pool, xsknum);
+ virtnet_xsk_complete(sq, xsknum, in_napi);
}
int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
@@ -116,6 +116,7 @@ bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
bool busy;
int ret;
+ sq->xsk.need_wakeup = false;
__free_old_xmit(sq, true, &stats);
if (xsk_uses_need_wakeup(pool))
@@ -138,6 +139,13 @@ bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
* triggered by interrupt.
*/
busy = false;
+
+ /* tx poll may not be triggered by tx interruption because of
+ * that start_xmit() and rx poll will try free old xmit that
+ * cause no tx interruption will be generated. So set
+ * need_wakeup, then tx poll can be triggered by free_old_xmit.
+ */
+ sq->xsk.need_wakeup = true;
break;
}
@@ -206,6 +214,26 @@ int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
return 0;
}
+void virtnet_xsk_complete(struct send_queue *sq, u32 num, bool in_napi)
+{
+ struct xsk_buff_pool *pool;
+
+ rcu_read_lock();
+
+ pool = rcu_dereference(sq->xsk.pool);
+ if (pool) {
+ if (num)
+ xsk_tx_completed(pool, num);
+
+ if (sq->xsk.need_wakeup) {
+ sq->xsk.need_wakeup = false;
+ virtnet_xsk_wakeup_sq(sq, in_napi);
+ }
+ }
+
+ rcu_read_unlock();
+}
+
static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq,
struct xsk_buff_pool *pool, struct net_device *dev)
{
@@ -298,6 +326,8 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
if (err)
goto err_rxq;
+ sq->xsk.need_wakeup = false;
+
/* Here is already protected by rtnl_lock, so rcu_assign_pointer
* is safe.
*/
@@ -19,6 +19,7 @@ static inline u32 ptr_to_xsk(void *ptr)
return ((unsigned long)ptr) >> VIRTIO_XSK_FLAG_OFFSET;
}
+void virtnet_xsk_complete(struct send_queue *sq, u32 num, bool in_napi);
int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp);
bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
int budget);
If the XSK xmit stops because the TX queue is full, this time is waiting for the TX interrupt to trigger the follow-up work again. But for Virtio Net, the recycling old buf is not only completed in tx napi, but also is called in start_xmit(), rx poll and other places. So if xsk xmit stop by full tx queue, __free_old_xmit() will try to wakeup tx napi. Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> --- drivers/net/virtio/virtio_net.h | 5 +++-- drivers/net/virtio/xsk.c | 30 ++++++++++++++++++++++++++++++ drivers/net/virtio/xsk.h | 1 + 3 files changed, 34 insertions(+), 2 deletions(-)