diff mbox series

[net-next,2/2] mlxsw: pci: Use fragmented buffers

Message ID ee38898c692e7f644a7f3ea4d33aeddb4dd917d2.1719321422.git.petrm@nvidia.com (mailing list archive)
State Accepted
Commit 36437f469d7e92635c8e07b63bd490f0c14c3cba
Delegated to: Netdev Maintainers
Headers show
Series mlxsw: Reduce memory footprint of mlxsw driver | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 842 this patch: 842
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 6 of 6 maintainers
netdev/build_clang success Errors and warnings before: 849 this patch: 849
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 849 this patch: 849
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 274 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-06-26--12-00 (tests: 664)

Commit Message

Petr Machata June 25, 2024, 1:47 p.m. UTC
From: Amit Cohen <amcohen@nvidia.com>

WQE (Work Queue Element) includes 3 scatter/gather entries for buffers.
The buffer can be split into 3 parts, software should set address and byte
count of each part.

A previous patch-set used page pool to allocate buffers, to simplify the
change, we first used one continuous buffer, which was allocated with
order > 0. This patch improves page pool usage to allocate the exact
number of pages which are required for packet.

As part of init, fill WQE.address[x] and WQE.byte_count* with pages which
are allocated from the pool. Fill x entries according to number of
scatter/gather entries which are required for maximum packet size. When a
packet is received, check the actual size and replace only the used pages.
Save bytes for software overhead only as part of the first entry.

This change also requires using fragmented SKB, till now all the buffer
was in the linear part. Note that 'skb->truesize' is decreased for small
packets.

For now the maximum buffer size is 3 * PAGE_SIZE which is enough, in
case that the driver will support larger MTU, we can use 'order' to
allocate more than one page per scatter/gather entry.

This change significantly reduces memory consumption of mlxsw driver. The
footprint is reduced by 26%.

Signed-off-by: Amit Cohen <amcohen@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Petr Machata <petrm@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlxsw/pci.c | 163 +++++++++++++++++-----
 1 file changed, 129 insertions(+), 34 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 0492013aca18..0320dabd1380 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -62,7 +62,7 @@  struct mlxsw_pci_mem_item {
 };
 
 struct mlxsw_pci_queue_elem_info {
-	struct page *page;
+	struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES];
 	char *elem; /* pointer to actual dma mapped element mem chunk */
 	struct {
 		struct sk_buff *skb;
@@ -350,7 +350,11 @@  mlxsw_pci_wqe_rx_frag_set(struct mlxsw_pci *mlxsw_pci, struct page *page,
 	dma_addr_t mapaddr;
 
 	mapaddr = page_pool_get_dma_addr(page);
-	mapaddr += MLXSW_PCI_SKB_HEADROOM;
+
+	if (index == 0) {
+		mapaddr += MLXSW_PCI_SKB_HEADROOM;
+		frag_len = frag_len - MLXSW_PCI_RX_BUF_SW_OVERHEAD;
+	}
 
 	mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
 	mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
@@ -385,29 +389,56 @@  static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
 	dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
 }
 
-static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *page,
+static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[],
 					       u16 byte_count)
 {
-	void *data = page_address(page);
-	unsigned int allocated_size;
+	unsigned int linear_data_size;
 	struct sk_buff *skb;
+	int page_index = 0;
+	bool linear_only;
+	void *data;
 
+	data = page_address(pages[page_index]);
 	net_prefetch(data);
-	allocated_size = page_size(page);
-	skb = napi_build_skb(data, allocated_size);
+
+	skb = napi_build_skb(data, PAGE_SIZE);
 	if (unlikely(!skb))
 		return ERR_PTR(-ENOMEM);
 
+	linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
+	linear_data_size = linear_only ? byte_count :
+					 PAGE_SIZE -
+					 MLXSW_PCI_RX_BUF_SW_OVERHEAD;
+
 	skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM);
-	skb_put(skb, byte_count);
+	skb_put(skb, linear_data_size);
+
+	if (linear_only)
+		return skb;
+
+	byte_count -= linear_data_size;
+	page_index++;
+
+	while (byte_count > 0) {
+		unsigned int frag_size;
+		struct page *page;
+
+		page = pages[page_index];
+		frag_size = min(byte_count, PAGE_SIZE);
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+				page, 0, frag_size, PAGE_SIZE);
+		byte_count -= frag_size;
+		page_index++;
+	}
+
 	return skb;
 }
 
 static int mlxsw_pci_rdq_page_alloc(struct mlxsw_pci_queue *q,
-				    struct mlxsw_pci_queue_elem_info *elem_info)
+				    struct mlxsw_pci_queue_elem_info *elem_info,
+				    int index)
 {
 	struct mlxsw_pci_queue *cq = q->u.rdq.cq;
-	size_t buf_len = MLXSW_PORT_MAX_MTU;
 	char *wqe = elem_info->elem;
 	struct page *page;
 
@@ -415,17 +446,19 @@  static int mlxsw_pci_rdq_page_alloc(struct mlxsw_pci_queue *q,
 	if (unlikely(!page))
 		return -ENOMEM;
 
-	mlxsw_pci_wqe_rx_frag_set(q->pci, page, wqe, 0, buf_len);
-	elem_info->page = page;
+	mlxsw_pci_wqe_rx_frag_set(q->pci, page, wqe, index, PAGE_SIZE);
+	elem_info->pages[index] = page;
 	return 0;
 }
 
 static void mlxsw_pci_rdq_page_free(struct mlxsw_pci_queue *q,
-				    struct mlxsw_pci_queue_elem_info *elem_info)
+				    struct mlxsw_pci_queue_elem_info *elem_info,
+				    int index)
 {
 	struct mlxsw_pci_queue *cq = q->u.rdq.cq;
 
-	page_pool_put_page(cq->u.cq.page_pool, elem_info->page, -1, false);
+	page_pool_put_page(cq->u.cq.page_pool, elem_info->pages[index], -1,
+			   false);
 }
 
 static u8 mlxsw_pci_num_sg_entries_get(u16 byte_count)
@@ -434,6 +467,64 @@  static u8 mlxsw_pci_num_sg_entries_get(u16 byte_count)
 			    PAGE_SIZE);
 }
 
+static int
+mlxsw_pci_elem_info_pages_ref_store(const struct mlxsw_pci_queue *q,
+				    const struct mlxsw_pci_queue_elem_info *el,
+				    u16 byte_count, struct page *pages[],
+				    u8 *p_num_sg_entries)
+{
+	u8 num_sg_entries;
+	int i;
+
+	num_sg_entries = mlxsw_pci_num_sg_entries_get(byte_count);
+	if (WARN_ON_ONCE(num_sg_entries > q->pci->num_sg_entries))
+		return -EINVAL;
+
+	for (i = 0; i < num_sg_entries; i++)
+		pages[i] = el->pages[i];
+
+	*p_num_sg_entries = num_sg_entries;
+	return 0;
+}
+
+static int
+mlxsw_pci_rdq_pages_alloc(struct mlxsw_pci_queue *q,
+			  struct mlxsw_pci_queue_elem_info *elem_info,
+			  u8 num_sg_entries)
+{
+	struct page *old_pages[MLXSW_PCI_WQE_SG_ENTRIES];
+	struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+	int i, err;
+
+	for (i = 0; i < num_sg_entries; i++) {
+		old_pages[i] = elem_info->pages[i];
+		err = mlxsw_pci_rdq_page_alloc(q, elem_info, i);
+		if (err) {
+			dev_err_ratelimited(&q->pci->pdev->dev, "Failed to alloc page\n");
+			goto err_page_alloc;
+		}
+	}
+
+	return 0;
+
+err_page_alloc:
+	for (i--; i >= 0; i--)
+		page_pool_recycle_direct(cq->u.cq.page_pool, old_pages[i]);
+
+	return err;
+}
+
+static void
+mlxsw_pci_rdq_pages_recycle(struct mlxsw_pci_queue *q, struct page *pages[],
+			    u8 num_sg_entries)
+{
+	struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+	int i;
+
+	for (i = 0; i < num_sg_entries; i++)
+		page_pool_recycle_direct(cq->u.cq.page_pool, pages[i]);
+}
+
 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
 			      struct mlxsw_pci_queue *q)
 {
@@ -441,7 +532,7 @@  static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
 	u8 sdq_count = mlxsw_pci->num_sdqs;
 	struct mlxsw_pci_queue *cq;
 	u8 cq_num;
-	int i;
+	int i, j;
 	int err;
 
 	q->producer_counter = 0;
@@ -472,9 +563,12 @@  static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
 	for (i = 0; i < q->count; i++) {
 		elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
 		BUG_ON(!elem_info);
-		err = mlxsw_pci_rdq_page_alloc(q, elem_info);
-		if (err)
-			goto rollback;
+
+		for (j = 0; j < mlxsw_pci->num_sg_entries; j++) {
+			err = mlxsw_pci_rdq_page_alloc(q, elem_info, j);
+			if (err)
+				goto rollback;
+		}
 		/* Everything is set up, ring doorbell to pass elem to HW */
 		q->producer_counter++;
 		mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
@@ -485,7 +579,9 @@  static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
 rollback:
 	for (i--; i >= 0; i--) {
 		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
-		mlxsw_pci_rdq_page_free(q, elem_info);
+		for (j--; j >= 0; j--)
+			mlxsw_pci_rdq_page_free(q, elem_info, j);
+		j = mlxsw_pci->num_sg_entries;
 	}
 	q->u.rdq.cq = NULL;
 	cq->u.cq.dq = NULL;
@@ -498,12 +594,13 @@  static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
 			       struct mlxsw_pci_queue *q)
 {
 	struct mlxsw_pci_queue_elem_info *elem_info;
-	int i;
+	int i, j;
 
 	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
 	for (i = 0; i < q->count; i++) {
 		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
-		mlxsw_pci_rdq_page_free(q, elem_info);
+		for (j = 0; j < mlxsw_pci->num_sg_entries; j++)
+			mlxsw_pci_rdq_page_free(q, elem_info, j);
 	}
 }
 
@@ -637,11 +734,11 @@  static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
 				     enum mlxsw_pci_cqe_v cqe_v, char *cqe)
 {
 	struct pci_dev *pdev = mlxsw_pci->pdev;
+	struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES];
 	struct mlxsw_pci_queue_elem_info *elem_info;
-	struct mlxsw_pci_queue *cq = q->u.rdq.cq;
 	struct mlxsw_rx_info rx_info = {};
 	struct sk_buff *skb;
-	struct page *page;
+	u8 num_sg_entries;
 	u16 byte_count;
 	int err;
 
@@ -654,18 +751,19 @@  static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
 	if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
 		byte_count -= ETH_FCS_LEN;
 
-	page = elem_info->page;
+	err = mlxsw_pci_elem_info_pages_ref_store(q, elem_info, byte_count,
+						  pages, &num_sg_entries);
+	if (err)
+		goto out;
 
-	err = mlxsw_pci_rdq_page_alloc(q, elem_info);
-	if (err) {
-		dev_err_ratelimited(&pdev->dev, "Failed to alloc page\n");
+	err = mlxsw_pci_rdq_pages_alloc(q, elem_info, num_sg_entries);
+	if (err)
 		goto out;
-	}
 
-	skb = mlxsw_pci_rdq_build_skb(page, byte_count);
+	skb = mlxsw_pci_rdq_build_skb(pages, byte_count);
 	if (IS_ERR(skb)) {
 		dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n");
-		page_pool_recycle_direct(cq->u.cq.page_pool, page);
+		mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries);
 		goto out;
 	}
 
@@ -886,15 +984,12 @@  static int mlxsw_pci_cq_page_pool_init(struct mlxsw_pci_queue *q,
 	struct page_pool_params pp_params = {};
 	struct mlxsw_pci *mlxsw_pci = q->pci;
 	struct page_pool *page_pool;
-	u32 max_pkt_size;
 
 	if (cq_type != MLXSW_PCI_CQ_RDQ)
 		return 0;
 
-	max_pkt_size = MLXSW_PORT_MAX_MTU + MLXSW_PCI_RX_BUF_SW_OVERHEAD;
-	pp_params.order = get_order(max_pkt_size);
 	pp_params.flags = PP_FLAG_DMA_MAP;
-	pp_params.pool_size = MLXSW_PCI_WQE_COUNT;
+	pp_params.pool_size = MLXSW_PCI_WQE_COUNT * mlxsw_pci->num_sg_entries;
 	pp_params.nid = dev_to_node(&mlxsw_pci->pdev->dev);
 	pp_params.dev = &mlxsw_pci->pdev->dev;
 	pp_params.napi = &q->u.cq.napi;