diff mbox series

[net-next,11/15] net/mlx5e: Support multi buffer XDP_TX

Message ID 20220318205248.33367-12-saeed@kernel.org (mailing list archive)
State Accepted
Commit a48ad58cec18702249a228267dec29d105bbe5a5
Delegated to: Netdev Maintainers
Headers show
Series [net-next,01/15] net/mlx5e: Prepare non-linear legacy RQ for XDP multi buffer support | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Pull request is its own cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 6 maintainers not CCed: daniel@iogearbox.net ast@kernel.org hawk@kernel.org john.fastabend@gmail.com pabeni@redhat.com bpf@vger.kernel.org
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning WARNING: line length of 84 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Saeed Mahameed March 18, 2022, 8:52 p.m. UTC
From: Maxim Mikityanskiy <maximmi@nvidia.com>

This commit enables passing multi buffer XDP frames to the TX handlers
on XDP_TX. Fragments are DMA synchronized to the device and queued to
the xdpi_fifo for a subsequent unmapping.

Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
 .../net/ethernet/mellanox/mlx5/core/en/xdp.c  | 39 +++++++++++++++----
 1 file changed, 31 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 368e54949614..f35b62ce4c07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -59,20 +59,17 @@  static inline bool
 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 		    struct page *page, struct xdp_buff *xdp)
 {
+	struct skb_shared_info *sinfo = NULL;
 	struct mlx5e_xmit_data xdptxd;
 	struct mlx5e_xdp_info xdpi;
 	struct xdp_frame *xdpf;
 	dma_addr_t dma_addr;
+	int i;
 
 	xdpf = xdp_convert_buff_to_frame(xdp);
 	if (unlikely(!xdpf))
 		return false;
 
-	if (unlikely(xdp_frame_has_frags(xdpf))) {
-		xdp_return_frame(xdpf);
-		return false;
-	}
-
 	xdptxd.data = xdpf->data;
 	xdptxd.len  = xdpf->len;
 
@@ -117,19 +114,45 @@  mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 	 */
 
 	xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
+	xdpi.page.rq = rq;
 
 	dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
 	dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE);
 
+	if (unlikely(xdp_frame_has_frags(xdpf))) {
+		sinfo = xdp_get_shared_info_from_frame(xdpf);
+
+		for (i = 0; i < sinfo->nr_frags; i++) {
+			skb_frag_t *frag = &sinfo->frags[i];
+			dma_addr_t addr;
+			u32 len;
+
+			addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
+				skb_frag_off(frag);
+			len = skb_frag_size(frag);
+			dma_sync_single_for_device(sq->pdev, addr, len,
+						   DMA_TO_DEVICE);
+		}
+	}
+
 	xdptxd.dma_addr = dma_addr;
-	xdpi.page.rq = rq;
-	xdpi.page.page = page;
 
 	if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
-				      mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0)))
+				      mlx5e_xmit_xdp_frame, sq, &xdptxd, sinfo, 0)))
 		return false;
 
+	xdpi.page.page = page;
 	mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+
+	if (unlikely(xdp_frame_has_frags(xdpf))) {
+		for (i = 0; i < sinfo->nr_frags; i++) {
+			skb_frag_t *frag = &sinfo->frags[i];
+
+			xdpi.page.page = skb_frag_page(frag);
+			mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+		}
+	}
+
 	return true;
 }