diff mbox series

[net-next,09/12] mlxsw: pci: Initialize XDP Rx queue info per RDQ

Message ID 56b84bd23f1745fad0547b62e0da17b656fd3f4c.1738665783.git.petrm@nvidia.com (mailing list archive)
State New
Delegated to: Netdev Maintainers
Headers show
Series mlxsw: Preparations for XDP support | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 12 of 12 maintainers
netdev/build_clang success Errors and warnings before: 2 this patch: 2
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 56 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2025-02-04--15-00 (tests: 886)

Commit Message

Petr Machata Feb. 4, 2025, 11:05 a.m. UTC
From: Amit Cohen <amcohen@nvidia.com>

In preparation for XDP support, register an Rx queue info structure for
each receive queue.

Each Rx queue is used by multiple net devices so pass a dummy net device
(unregistered, 0 ifindex) as the device.

Pass a queue index of 0 since the net devices are registered by the
driver as single queue.

Signed-off-by: Amit Cohen <amcohen@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Petr Machata <petrm@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlxsw/pci.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index bd6c772a3384..b102be38d29d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -14,6 +14,7 @@ 
 #include <linux/log2.h>
 #include <linux/string.h>
 #include <net/page_pool/helpers.h>
+#include <net/xdp.h>
 
 #include "pci_hw.h"
 #include "pci.h"
@@ -93,6 +94,7 @@  struct mlxsw_pci_queue {
 		} eq;
 		struct {
 			struct mlxsw_pci_queue *cq;
+			struct xdp_rxq_info xdp_rxq;
 		} rdq;
 	} u;
 };
@@ -624,6 +626,11 @@  static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
 	cq->u.cq.dq = q;
 	q->u.rdq.cq = cq;
 
+	err = __xdp_rxq_info_reg(&q->u.rdq.xdp_rxq, mlxsw_pci->napi_dev_rx, 0,
+				 cq->u.cq.napi.napi_id, PAGE_SIZE);
+	if (err)
+		goto err_xdp_rxq_info_reg;
+
 	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
 
 	for (i = 0; i < q->count; i++) {
@@ -633,7 +640,7 @@  static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
 		for (j = 0; j < mlxsw_pci->num_sg_entries; j++) {
 			err = mlxsw_pci_rdq_page_alloc(q, elem_info, j);
 			if (err)
-				goto rollback;
+				goto err_rdq_page_alloc;
 		}
 		/* Everything is set up, ring doorbell to pass elem to HW */
 		q->producer_counter++;
@@ -642,13 +649,15 @@  static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
 
 	return 0;
 
-rollback:
+err_rdq_page_alloc:
 	for (i--; i >= 0; i--) {
 		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
 		for (j--; j >= 0; j--)
 			mlxsw_pci_rdq_page_free(q, elem_info, j);
 		j = mlxsw_pci->num_sg_entries;
 	}
+	xdp_rxq_info_unreg(&q->u.rdq.xdp_rxq);
+err_xdp_rxq_info_reg:
 	q->u.rdq.cq = NULL;
 	cq->u.cq.dq = NULL;
 	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
@@ -663,6 +672,7 @@  static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
 	int i, j;
 
 	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+	xdp_rxq_info_unreg(&q->u.rdq.xdp_rxq);
 	for (i = 0; i < q->count; i++) {
 		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
 		for (j = 0; j < mlxsw_pci->num_sg_entries; j++)