diff mbox series

[net,4/8] net/mlx5: HWS, use lock classes for bwc locks

Message ID 20241015093208.197603-5-tariqt@nvidia.com (mailing list archive)
State Accepted
Commit 9addffa3435973e016f066e13f950be5eed73c06
Delegated to: Netdev Maintainers
Headers show
Series mlx5 misc fixes 2024-10-15 | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag present in non-next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 5 this patch: 5
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 2 maintainers not CCed: andrew+netdev@lunn.ch linux-rdma@vger.kernel.org
netdev/build_clang success Errors and warnings before: 3 this patch: 3
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 4 this patch: 4
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 43 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Tariq Toukan Oct. 15, 2024, 9:32 a.m. UTC
From: Cosmin Ratiu <cratiu@nvidia.com>

The HWS BWC API uses one lock per queue and usually acquires one of
them, except when doing changes which require locking all queues in
order. Naturally, lockdep isn't too happy about acquiring the same lock
class multiple times, so inform it that each queue lock is a different
class to avoid false positives.

Fixes: 2ca62599aa0b ("net/mlx5: HWS, added send engine and context handling")
Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
 .../mlx5/core/steering/hws/mlx5hws_context.h  |  1 +
 .../mlx5/core/steering/hws/mlx5hws_send.c     | 20 +++++++++++++++++--
 2 files changed, 19 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
index e5a7ce604334..8ab548aa402b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
@@ -46,6 +46,7 @@  struct mlx5hws_context {
 	struct mlx5hws_send_engine *send_queue;
 	size_t queues;
 	struct mutex *bwc_send_queue_locks; /* protect BWC queues */
+	struct lock_class_key *bwc_lock_class_keys;
 	struct list_head tbl_list;
 	struct mlx5hws_context_debug_info debug_info;
 	struct xarray peer_ctx_xa;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
index e101dc46d99e..6d443e6ee8d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
@@ -947,8 +947,12 @@  static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx)
 	if (!mlx5hws_context_bwc_supported(ctx))
 		return;
 
-	for (i = 0; i < bwc_queues; i++)
+	for (i = 0; i < bwc_queues; i++) {
 		mutex_destroy(&ctx->bwc_send_queue_locks[i]);
+		lockdep_unregister_key(ctx->bwc_lock_class_keys + i);
+	}
+
+	kfree(ctx->bwc_lock_class_keys);
 	kfree(ctx->bwc_send_queue_locks);
 }
 
@@ -977,10 +981,22 @@  static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
 	if (!ctx->bwc_send_queue_locks)
 		return -ENOMEM;
 
-	for (i = 0; i < bwc_queues; i++)
+	ctx->bwc_lock_class_keys = kcalloc(bwc_queues,
+					   sizeof(*ctx->bwc_lock_class_keys),
+					   GFP_KERNEL);
+	if (!ctx->bwc_lock_class_keys)
+		goto err_lock_class_keys;
+
+	for (i = 0; i < bwc_queues; i++) {
 		mutex_init(&ctx->bwc_send_queue_locks[i]);
+		lockdep_register_key(ctx->bwc_lock_class_keys + i);
+	}
 
 	return 0;
+
+err_lock_class_keys:
+	kfree(ctx->bwc_send_queue_locks);
+	return -ENOMEM;
 }
 
 int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,