diff mbox series

[net-next,13/15] net/mlx5e: kTLS, Use a single async context object per a callback bulk

Message ID 20221112102147.496378-14-saeed@kernel.org (mailing list archive)
State Accepted
Commit 341361533011945f0a7da85632662a5879233853
Delegated to: Netdev Maintainers
Headers show
Series [net-next,01/15] net/mlx5: Bridge, Use debug instead of warn if entry doesn't exists | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Pull request is its own cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers success CCed 5 of 5 maintainers
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning WARNING: line length of 92 exceeds 80 columns WARNING: line length of 94 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Saeed Mahameed Nov. 12, 2022, 10:21 a.m. UTC
From: Tariq Toukan <tariqt@nvidia.com>

A single async context object is sufficient to wait for the completions
of many callbacks.  Switch to using one instance per a bulk of commands.

Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Gal Pressman <gal@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ktls_tx.c     | 50 +++++++++----------
 1 file changed, 25 insertions(+), 25 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index fcaa26847c8a..78072bf93f3f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -125,7 +125,7 @@  mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
 /* struct for callback API management */
 struct mlx5e_async_ctx {
 	struct mlx5_async_work context;
-	struct mlx5_async_ctx async_ctx;
+	struct mlx5_async_ctx *async_ctx;
 	struct mlx5e_ktls_offload_context_tx *priv_tx;
 	int err;
 	union {
@@ -134,33 +134,33 @@  struct mlx5e_async_ctx {
 	};
 };
 
-static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
+struct mlx5e_bulk_async_ctx {
+	struct mlx5_async_ctx async_ctx;
+	DECLARE_FLEX_ARRAY(struct mlx5e_async_ctx, arr);
+};
+
+static struct mlx5e_bulk_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
 {
-	struct mlx5e_async_ctx *bulk_async;
+	struct mlx5e_bulk_async_ctx *bulk_async;
+	int sz;
 	int i;
 
-	bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
+	sz = struct_size(bulk_async, arr, n);
+	bulk_async = kvzalloc(sz, GFP_KERNEL);
 	if (!bulk_async)
 		return NULL;
 
-	for (i = 0; i < n; i++) {
-		struct mlx5e_async_ctx *async = &bulk_async[i];
+	mlx5_cmd_init_async_ctx(mdev, &bulk_async->async_ctx);
 
-		mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
-	}
+	for (i = 0; i < n; i++)
+		bulk_async->arr[i].async_ctx = &bulk_async->async_ctx;
 
 	return bulk_async;
 }
 
-static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
+static void mlx5e_bulk_async_cleanup(struct mlx5e_bulk_async_ctx *bulk_async)
 {
-	int i;
-
-	for (i = 0; i < n; i++) {
-		struct mlx5e_async_ctx *async = &bulk_async[i];
-
-		mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
-	}
+	mlx5_cmd_cleanup_async_ctx(&bulk_async->async_ctx);
 	kvfree(bulk_async);
 }
 
@@ -208,7 +208,7 @@  mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw
 			goto err_out;
 	} else {
 		async->priv_tx = priv_tx;
-		err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
+		err = mlx5e_ktls_create_tis_cb(mdev, async->async_ctx,
 					       async->out_create, sizeof(async->out_create),
 					       create_tis_callback, &async->context);
 		if (err)
@@ -231,7 +231,7 @@  static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv
 	}
 	async->priv_tx = priv_tx;
 	mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
-				  &async->async_ctx,
+				  async->async_ctx,
 				  async->out_destroy, sizeof(async->out_destroy),
 				  destroy_tis_callback, &async->context);
 }
@@ -240,7 +240,7 @@  static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
 					   struct list_head *list, int size)
 {
 	struct mlx5e_ktls_offload_context_tx *obj, *n;
-	struct mlx5e_async_ctx *bulk_async;
+	struct mlx5e_bulk_async_ctx *bulk_async;
 	int i;
 
 	bulk_async = mlx5e_bulk_async_init(mdev, size);
@@ -249,11 +249,11 @@  static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
 
 	i = 0;
 	list_for_each_entry_safe(obj, n, list, list_node) {
-		mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
+		mlx5e_tls_priv_tx_cleanup(obj, &bulk_async->arr[i]);
 		i++;
 	}
 
-	mlx5e_bulk_async_cleanup(bulk_async, size);
+	mlx5e_bulk_async_cleanup(bulk_async);
 }
 
 /* Recycling pool API */
@@ -279,7 +279,7 @@  static void create_work(struct work_struct *work)
 	struct mlx5e_tls_tx_pool *pool =
 		container_of(work, struct mlx5e_tls_tx_pool, create_work);
 	struct mlx5e_ktls_offload_context_tx *obj;
-	struct mlx5e_async_ctx *bulk_async;
+	struct mlx5e_bulk_async_ctx *bulk_async;
 	LIST_HEAD(local_list);
 	int i, j, err = 0;
 
@@ -288,7 +288,7 @@  static void create_work(struct work_struct *work)
 		return;
 
 	for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
-		obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
+		obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async->arr[i]);
 		if (IS_ERR(obj)) {
 			err = PTR_ERR(obj);
 			break;
@@ -297,13 +297,13 @@  static void create_work(struct work_struct *work)
 	}
 
 	for (j = 0; j < i; j++) {
-		struct mlx5e_async_ctx *async = &bulk_async[j];
+		struct mlx5e_async_ctx *async = &bulk_async->arr[j];
 
 		if (!err && async->err)
 			err = async->err;
 	}
 	atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
-	mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
+	mlx5e_bulk_async_cleanup(bulk_async);
 	if (err)
 		goto err_out;