@@ -1613,6 +1613,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
int opaque;
bool done;
+ sq->xsk.last_cpu = smp_processor_id();
+
if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
/* We don't need to enable cb for XDP */
napi_complete_done(napi, 0);
@@ -3197,6 +3199,7 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
.ndo_bpf = virtnet_xdp,
.ndo_xdp_xmit = virtnet_xdp_xmit,
+ .ndo_xsk_wakeup = virtnet_xsk_wakeup,
.ndo_features_check = passthru_features_check,
.ndo_get_phys_port_name = virtnet_get_phys_port_name,
.ndo_set_features = virtnet_set_features,
@@ -174,6 +174,8 @@ struct send_queue {
struct xsk_buff_pool __rcu *pool;
dma_addr_t hdr_dma_address;
+
+ u32 last_cpu;
} xsk;
};
@@ -153,6 +153,59 @@ bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
return busy;
}
+static void xsk_remote_trigger_napi(void *info)
+{
+ struct send_queue *sq = info;
+
+ virtqueue_napi_schedule(&sq->napi, sq->vq);
+}
+
+static void virtnet_xsk_wakeup_sq(struct send_queue *sq, bool in_napi)
+{
+ u32 last_cpu, cur_cpu;
+
+ if (napi_if_scheduled_mark_missed(&sq->napi))
+ return;
+
+ last_cpu = sq->xsk.last_cpu;
+
+ cur_cpu = get_cpu();
+
+ /* On remote cpu, softirq will run automatically when ipi irq exit. On
+ * local cpu, smp_call_xxx will not trigger ipi interrupt, then softirq
+ * cannot be triggered automatically by ipi irq exit.
+ */
+ if (last_cpu == cur_cpu) {
+ virtqueue_napi_schedule(&sq->napi, sq->vq);
+
+ /* Not in softirq/irq context, we must raise napi tx manually. */
+ if (!in_napi)
+ napi_tx_raise();
+ } else {
+ smp_call_function_single(last_cpu, xsk_remote_trigger_napi, sq, true);
+ }
+
+ put_cpu();
+}
+
+int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct send_queue *sq;
+
+ if (!netif_running(dev))
+ return -ENETDOWN;
+
+ if (qid >= vi->curr_queue_pairs)
+ return -EINVAL;
+
+ sq = &vi->sq[qid];
+
+ virtnet_xsk_wakeup_sq(sq, false);
+
+ return 0;
+}
+
static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq,
struct xsk_buff_pool *pool, struct net_device *dev)
{
@@ -22,4 +22,5 @@ static inline u32 ptr_to_xsk(void *ptr)
int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp);
bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
int budget);
+int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag);
#endif
xsk wakeup is used to trigger the logic for xsk xmit by xsk framework or user. Virtio-Net does not support to actively generate a interruption, so it try to trigger tx NAPI on the tx interrupt cpu. Consider the effect of cache. When interrupt triggers, it is generally fixed on a CPU. It is better to start TX Napi on the same CPU. Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> --- drivers/net/virtio/main.c | 3 ++ drivers/net/virtio/virtio_net.h | 2 ++ drivers/net/virtio/xsk.c | 53 +++++++++++++++++++++++++++++++++ drivers/net/virtio/xsk.h | 1 + 4 files changed, 59 insertions(+)