@@ -1650,7 +1650,8 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
IFF_LIVE_ADDR_CHANGE;
netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
- NETDEV_XDP_ACT_REDIRECT;
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
return 0;
}
@@ -2844,6 +2845,7 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_eth_ioctl = ionic_eth_ioctl,
.ndo_start_xmit = ionic_start_xmit,
.ndo_bpf = ionic_xdp,
+ .ndo_xdp_xmit = ionic_xdp_xmit,
.ndo_get_stats64 = ionic_get_stats64,
.ndo_set_rx_mode = ionic_ndo_set_rx_mode,
.ndo_set_features = ionic_set_features,
@@ -316,9 +316,13 @@ static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
buf_info = desc_info->bufs;
dma_unmap_single(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
- __free_pages(buf_info->page, 0);
+ if (desc_info->act == XDP_TX)
+ __free_pages(buf_info->page, 0);
buf_info->page = NULL;
+ if (desc_info->act == XDP_REDIRECT)
+ xdp_return_frame(desc_info->xdpf);
+
desc_info->nbufs = 0;
desc_info->xdpf = NULL;
desc_info->act = 0;
@@ -372,6 +376,62 @@ static int ionic_xdp_post_frame(struct net_device *netdev,
return 0;
}
+int ionic_xdp_xmit(struct net_device *netdev, int n,
+ struct xdp_frame **xdp_frames, u32 flags)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_queue *txq;
+ struct netdev_queue *nq;
+ int nxmit;
+ int space;
+ int cpu;
+ int qi;
+
+ if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ /* AdminQ is assumed on cpu 0, while we attempt to affinitize the
+ * TxRx queue pairs 0..n-1 on cpus 1..n. We try to keep with that
+ * affinitization here, but of course irqbalance and friends might
+ * have juggled things anyway, so we have to check for the 0 case.
+ */
+ cpu = smp_processor_id();
+ qi = cpu ? (cpu - 1) % lif->nxqs : cpu;
+
+ txq = &lif->txqcqs[qi]->q;
+ nq = netdev_get_tx_queue(netdev, txq->index);
+ __netif_tx_lock(nq, cpu);
+
+ if (netif_tx_queue_stopped(nq) ||
+ unlikely(ionic_maybe_stop_tx(txq, 1))) {
+ __netif_tx_unlock(nq);
+ return -EIO;
+ }
+
+ space = min_t(int, n, ionic_q_space_avail(txq));
+ for (nxmit = 0; nxmit < space ; nxmit++) {
+ if (ionic_xdp_post_frame(netdev, txq, xdp_frames[nxmit],
+ XDP_REDIRECT,
+ virt_to_page(xdp_frames[nxmit]->data),
+ 0, false)) {
+ nxmit--;
+ break;
+ }
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ ionic_dbell_ring(lif->kern_dbpage, txq->hw_type,
+ txq->dbval | txq->head_idx);
+
+ ionic_maybe_stop_tx(txq, 4);
+ __netif_tx_unlock(nq);
+
+ return nxmit;
+}
+
static bool ionic_run_xdp(struct ionic_rx_stats *stats,
struct net_device *netdev,
struct ionic_queue *rxq,
@@ -17,4 +17,5 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+int ionic_xdp_xmit(struct net_device *netdev, int n, struct xdp_frame **xdp, u32 flags);
#endif /* _IONIC_TXRX_H_ */