diff mbox series

[net-next,05/12] mlxsw: pci: Add a separate function for syncing buffers for CPU

Message ID 7674318d47d36fb91a64351ca64a491ec61d5284.1738665783.git.petrm@nvidia.com (mailing list archive)
State New
Delegated to: Netdev Maintainers
Headers show
Series mlxsw: Preparations for XDP support | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 8 of 8 maintainers
netdev/build_clang success Errors and warnings before: 2 this patch: 2
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 63 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2025-02-04--15-00 (tests: 886)

Commit Message

Petr Machata Feb. 4, 2025, 11:05 a.m. UTC
From: Amit Cohen <amcohen@nvidia.com>

Currently, sync for CPU is done as part of building SKB. When XDP will
be supported, such sync should be done earlier, before creating XDP
buffer. Add a function for syncing buffers for CPU and call it early in
mlxsw_pci_cqe_rdq_handle(), as in future patch, the driver will handle XDP
there.

Signed-off-by: Amit Cohen <amcohen@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Petr Machata <petrm@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlxsw/pci.c | 30 +++++++++++++++++------
 1 file changed, 22 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 374b3f2f117d..5796d836a7ee 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -433,22 +433,34 @@  mlxsw_pci_rx_pkt_info_init(const struct mlxsw_pci *pci,
 	return 0;
 }
 
+static void
+mlxsw_pci_sync_for_cpu(const struct mlxsw_pci_queue *q,
+		       const struct mlxsw_pci_rx_pkt_info *rx_pkt_info)
+{
+	struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+	struct page_pool *page_pool;
+	int i;
+
+	page_pool = cq->u.cq.page_pool;
+
+	for (i = 0; i < rx_pkt_info->num_sg_entries; i++) {
+		u32 offset = i ? 0 : MLXSW_PCI_SKB_HEADROOM;
+
+		page_pool_dma_sync_for_cpu(page_pool, rx_pkt_info->pages[i],
+					   offset,
+					   rx_pkt_info->sg_entries_size[i]);
+	}
+}
+
 static struct sk_buff *
 mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
 			const struct mlxsw_pci_rx_pkt_info *rx_pkt_info)
 {
-	struct mlxsw_pci_queue *cq = q->u.rdq.cq;
 	unsigned int linear_data_size;
-	struct page_pool *page_pool;
 	struct sk_buff *skb;
 	void *data;
 	int i;
 
-	linear_data_size = rx_pkt_info->sg_entries_size[0];
-	page_pool = cq->u.cq.page_pool;
-	page_pool_dma_sync_for_cpu(page_pool, rx_pkt_info->pages[0],
-				   MLXSW_PCI_SKB_HEADROOM, linear_data_size);
-
 	data = page_address(rx_pkt_info->pages[0]);
 	net_prefetch(data);
 
@@ -457,6 +469,7 @@  mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
 		return ERR_PTR(-ENOMEM);
 
 	skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM);
+	linear_data_size = rx_pkt_info->sg_entries_size[0];
 	skb_put(skb, linear_data_size);
 
 	if (rx_pkt_info->num_sg_entries == 1)
@@ -468,7 +481,6 @@  mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
 
 		page = rx_pkt_info->pages[i];
 		frag_size = rx_pkt_info->sg_entries_size[i];
-		page_pool_dma_sync_for_cpu(page_pool, page, 0, frag_size);
 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 				page, 0, frag_size, PAGE_SIZE);
 	}
@@ -784,6 +796,8 @@  static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
 	if (err)
 		goto out;
 
+	mlxsw_pci_sync_for_cpu(q, &rx_pkt_info);
+
 	err = mlxsw_pci_rdq_pages_alloc(q, elem_info,
 					rx_pkt_info.num_sg_entries);
 	if (err)