@@ -1104,7 +1104,8 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len,
+ u16 qidx, u16 flags);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
netdev_features_t features);
@@ -2693,11 +2693,15 @@ static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
if (dma_mapping_error(pf->dev, dma_addr))
return -ENOMEM;
- err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len,
+ qidx, XDP_REDIRECT);
if (!err) {
otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
page = virt_to_page(xdpf->data);
- put_page(page);
+ if (page->pp)
+ page_pool_recycle_direct(page->pp, page);
+ else
+ put_page(page);
return -ENOMEM;
}
return 0;
@@ -102,19 +102,20 @@ static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
struct nix_send_comp_s *snd_comp = &cqe->comp;
struct sg_list *sg;
struct page *page;
- u64 pa;
+ u64 pa, iova;
sg = &sq->sg[snd_comp->sqe_id];
- pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
- otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
- sg->size[0], DMA_TO_DEVICE);
+ iova = sg->dma_addr[0] - OTX2_HEAD_ROOM;
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_page(phys_to_virt(pa));
+ if (sg->flags & XDP_REDIRECT)
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0], sg->size[0], DMA_TO_DEVICE);
+
if (page->pp) {
page_pool_recycle_direct(page->pp, page);
return;
}
-
put_page(page);
}
@@ -1379,7 +1380,7 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf)
}
static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
- int len, int *offset)
+ int len, int *offset, u16 flags)
{
struct nix_sqe_sg_s *sg = NULL;
u64 *iova = NULL;
@@ -1396,9 +1397,11 @@ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
sq->sg[sq->head].dma_addr[0] = dma_addr;
sq->sg[sq->head].size[0] = len;
sq->sg[sq->head].num_segs = 1;
+ sq->sg[sq->head].flags = flags;
}
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len,
+ u16 qidx, u16 flags)
{
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_snd_queue *sq;
@@ -1424,7 +1427,7 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
offset = sizeof(*sqe_hdr);
- otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ otx2_xdp_sqe_add_sg(sq, iova, len, &offset, flags);
sqe_hdr->sizem1 = (offset / 16) - 1;
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
@@ -1477,8 +1480,8 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
case XDP_TX:
qidx += pfvf->hw.tx_queues;
cq->pool_ptrs++;
- return otx2_xdp_sq_append_pkt(pfvf, iova,
- cqe->sg.seg_size, qidx);
+ return otx2_xdp_sq_append_pkt(pfvf, cqe->sg.seg_addr,
+ cqe->sg.seg_size, qidx, XDP_TX);
case XDP_REDIRECT:
cq->pool_ptrs++;
if (xsk_buff) {
@@ -1497,7 +1500,14 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
*need_xdp_flush = true;
return true;
}
- page_pool_recycle_direct(pool->page_pool, page);
+ if (page->pp) {
+ page_pool_recycle_direct(pool->page_pool, page);
+ return false;
+ }
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
break;
default:
bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
@@ -77,6 +77,7 @@ struct otx2_rcv_queue {
struct sg_list {
u16 num_segs;
+ u16 flags;
u64 skb;
u64 size[OTX2_MAX_FRAGS_IN_SQE];
u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];