diff mbox series

[net-next,04/12] mlxsw: pci: Use mlxsw_pci_rx_pkt_info

Message ID d51ed1f65b666236e0caa197fd8669d88beba7cb.1738665783.git.petrm@nvidia.com (mailing list archive)
State New
Delegated to: Netdev Maintainers
Headers show
Series mlxsw: Preparations for XDP support | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 8 of 8 maintainers
netdev/build_clang success Errors and warnings before: 2 this patch: 2
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 121 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2025-02-04--15-00 (tests: 886)

Commit Message

Petr Machata Feb. 4, 2025, 11:04 a.m. UTC
From: Amit Cohen <amcohen@nvidia.com>

Pass the newly added structure as an argument for mlxsw_pci_rdq_build_skb()
and use it.

Remove mlxsw_pci_elem_info_pages_ref_store(), as mlxsw_pci_rx_pkt_info
stores pointers to pages.

Pass to mlxsw_pci_rdq_pages_alloc() number of scatter/gather entries which
is stored in mlxsw_pci_rx_pkt_info.

Signed-off-by: Amit Cohen <amcohen@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Petr Machata <petrm@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlxsw/pci.c | 65 ++++++-----------------
 1 file changed, 16 insertions(+), 49 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index aca1857a4e70..374b3f2f117d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -433,28 +433,23 @@  mlxsw_pci_rx_pkt_info_init(const struct mlxsw_pci *pci,
 	return 0;
 }
 
-static struct sk_buff *mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
-					       struct page *pages[],
-					       u16 byte_count)
+static struct sk_buff *
+mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
+			const struct mlxsw_pci_rx_pkt_info *rx_pkt_info)
 {
 	struct mlxsw_pci_queue *cq = q->u.rdq.cq;
 	unsigned int linear_data_size;
 	struct page_pool *page_pool;
 	struct sk_buff *skb;
-	int page_index = 0;
-	bool linear_only;
 	void *data;
+	int i;
 
-	linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
-	linear_data_size = linear_only ? byte_count :
-					 PAGE_SIZE -
-					 MLXSW_PCI_RX_BUF_SW_OVERHEAD;
-
+	linear_data_size = rx_pkt_info->sg_entries_size[0];
 	page_pool = cq->u.cq.page_pool;
-	page_pool_dma_sync_for_cpu(page_pool, pages[page_index],
+	page_pool_dma_sync_for_cpu(page_pool, rx_pkt_info->pages[0],
 				   MLXSW_PCI_SKB_HEADROOM, linear_data_size);
 
-	data = page_address(pages[page_index]);
+	data = page_address(rx_pkt_info->pages[0]);
 	net_prefetch(data);
 
 	skb = napi_build_skb(data, PAGE_SIZE);
@@ -464,23 +459,18 @@  static struct sk_buff *mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
 	skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM);
 	skb_put(skb, linear_data_size);
 
-	if (linear_only)
+	if (rx_pkt_info->num_sg_entries == 1)
 		return skb;
 
-	byte_count -= linear_data_size;
-	page_index++;
-
-	while (byte_count > 0) {
+	for (i = 1; i < rx_pkt_info->num_sg_entries; i++) {
 		unsigned int frag_size;
 		struct page *page;
 
-		page = pages[page_index];
-		frag_size = min(byte_count, PAGE_SIZE);
+		page = rx_pkt_info->pages[i];
+		frag_size = rx_pkt_info->sg_entries_size[i];
 		page_pool_dma_sync_for_cpu(page_pool, page, 0, frag_size);
 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 				page, 0, frag_size, PAGE_SIZE);
-		byte_count -= frag_size;
-		page_index++;
 	}
 
 	return skb;
@@ -513,24 +503,6 @@  static void mlxsw_pci_rdq_page_free(struct mlxsw_pci_queue *q,
 			   false);
 }
 
-static int
-mlxsw_pci_elem_info_pages_ref_store(const struct mlxsw_pci_queue *q,
-				    const struct mlxsw_pci_queue_elem_info *el,
-				    u16 byte_count, struct page *pages[],
-				    u8 *p_num_sg_entries)
-{
-	u8 num_sg_entries;
-	int i;
-
-	num_sg_entries = mlxsw_pci_num_sg_entries_get(byte_count);
-
-	for (i = 0; i < num_sg_entries; i++)
-		pages[i] = el->pages[i];
-
-	*p_num_sg_entries = num_sg_entries;
-	return 0;
-}
-
 static int
 mlxsw_pci_rdq_pages_alloc(struct mlxsw_pci_queue *q,
 			  struct mlxsw_pci_queue_elem_info *elem_info,
@@ -780,11 +752,9 @@  static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
 {
 	struct mlxsw_pci_rx_pkt_info rx_pkt_info = {};
 	struct pci_dev *pdev = mlxsw_pci->pdev;
-	struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES];
 	struct mlxsw_pci_queue_elem_info *elem_info;
 	struct mlxsw_rx_info rx_info = {};
 	struct sk_buff *skb;
-	u8 num_sg_entries;
 	u16 byte_count;
 	int err;
 
@@ -814,19 +784,16 @@  static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
 	if (err)
 		goto out;
 
-	err = mlxsw_pci_elem_info_pages_ref_store(q, elem_info, byte_count,
-						  pages, &num_sg_entries);
+	err = mlxsw_pci_rdq_pages_alloc(q, elem_info,
+					rx_pkt_info.num_sg_entries);
 	if (err)
 		goto out;
 
-	err = mlxsw_pci_rdq_pages_alloc(q, elem_info, num_sg_entries);
-	if (err)
-		goto out;
-
-	skb = mlxsw_pci_rdq_build_skb(q, pages, byte_count);
+	skb = mlxsw_pci_rdq_build_skb(q, &rx_pkt_info);
 	if (IS_ERR(skb)) {
 		dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n");
-		mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries);
+		mlxsw_pci_rdq_pages_recycle(q, rx_pkt_info.pages,
+					    rx_pkt_info.num_sg_entries);
 		goto out;
 	}