diff mbox series

[net-next,06/13] net/mlx5: fs, add HWS modify header API function

Message ID 20250107060708.1610882-7-tariqt@nvidia.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series mlx5 HW-Managed Flow Steering in FS core level | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1 this patch: 1
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 1 maintainers not CCed: linux-rdma@vger.kernel.org
netdev/build_clang success Errors and warnings before: 73 this patch: 73
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 2 this patch: 2
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Tariq Toukan Jan. 7, 2025, 6:07 a.m. UTC
From: Moshe Shemesh <moshe@nvidia.com>

Add modify header alloc and dealloc API functions to provide modify
header actions for steering rules. Use fs hws pools to get actions from
shared bulks of modify header actions.

Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Reviewed-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
 .../net/ethernet/mellanox/mlx5/core/fs_core.h |   1 +
 .../mellanox/mlx5/core/steering/hws/fs_hws.c  | 117 +++++++++++++
 .../mellanox/mlx5/core/steering/hws/fs_hws.h  |   2 +
 .../mlx5/core/steering/hws/fs_hws_pools.c     | 164 ++++++++++++++++++
 .../mlx5/core/steering/hws/fs_hws_pools.h     |  22 +++
 5 files changed, 306 insertions(+)

Comments

Przemek Kitszel Jan. 7, 2025, 12:09 p.m. UTC | #1
On 1/7/25 07:07, Tariq Toukan wrote:
> From: Moshe Shemesh <moshe@nvidia.com>
> 
> Add modify header alloc and dealloc API functions to provide modify
> header actions for steering rules. Use fs hws pools to get actions from
> shared bulks of modify header actions.
> 
> Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
> Reviewed-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
> Reviewed-by: Mark Bloch <mbloch@nvidia.com>
> Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
> ---
>   .../net/ethernet/mellanox/mlx5/core/fs_core.h |   1 +
>   .../mellanox/mlx5/core/steering/hws/fs_hws.c  | 117 +++++++++++++
>   .../mellanox/mlx5/core/steering/hws/fs_hws.h  |   2 +
>   .../mlx5/core/steering/hws/fs_hws_pools.c     | 164 ++++++++++++++++++
>   .../mlx5/core/steering/hws/fs_hws_pools.h     |  22 +++
>   5 files changed, 306 insertions(+)
> 
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
> index 9b0575a61362..06ec48f51b6d 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
> @@ -65,6 +65,7 @@ struct mlx5_modify_hdr {
>   	enum mlx5_flow_resource_owner owner;
>   	union {
>   		struct mlx5_fs_dr_action fs_dr_action;
> +		struct mlx5_fs_hws_action fs_hws_action;
>   		u32 id;
>   	};
>   };
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
> index 723865140b2e..a75e5ce168c7 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
> @@ -14,6 +14,8 @@ static struct mlx5hws_action *
>   create_action_remove_header_vlan(struct mlx5hws_context *ctx);
>   static void destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
>   			    unsigned long index);
> +static void destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
> +			    unsigned long index);

usual "please add your suffix" complain

sorry for mostly nitpicks, I will take deeper look later

>   
>   static int init_hws_actions_pool(struct mlx5_core_dev *dev,
>   				 struct mlx5_fs_hws_context *fs_ctx)
> @@ -56,6 +58,7 @@ static int init_hws_actions_pool(struct mlx5_core_dev *dev,
>   		goto cleanup_insert_hdr;
>   	xa_init(&hws_pool->el2tol3tnl_pools);
>   	xa_init(&hws_pool->el2tol2tnl_pools);
> +	xa_init(&hws_pool->mh_pools);
>   	return 0;
>   
>   cleanup_insert_hdr:
> @@ -81,6 +84,9 @@ static void cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
>   	struct mlx5_fs_pool *pool;
>   	unsigned long i;
>   
> +	xa_for_each(&hws_pool->mh_pools, i, pool)
> +		destroy_mh_pool(pool, &hws_pool->mh_pools, i);
> +	xa_destroy(&hws_pool->mh_pools);
>   	xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
>   		destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
>   	xa_destroy(&hws_pool->el2tol2tnl_pools);
> @@ -528,6 +534,115 @@ static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace
>   	pkt_reformat->fs_hws_action.pr_data = NULL;
>   }
>   
> +static struct mlx5_fs_pool *
> +create_mh_pool(struct mlx5_core_dev *dev,

ditto prefix

[...]

> +static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
> +					    u8 namespace, u8 num_actions,
> +					    void *modify_actions,
> +					    struct mlx5_modify_hdr *modify_hdr)
> +{
> +	struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
> +	struct mlx5hws_action_mh_pattern pattern = {};
> +	struct mlx5_fs_hws_mh *mh_data = NULL;
> +	struct mlx5hws_action *hws_action;
> +	struct mlx5_fs_pool *pool;
> +	unsigned long i, cnt = 0;
> +	bool known_pattern;
> +	int err;
> +
> +	pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
> +	pattern.data = modify_actions;
> +
> +	known_pattern = false;
> +	xa_for_each(&hws_pool->mh_pools, i, pool) {
> +		if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
> +			known_pattern = true;
> +			break;
> +		}
> +		cnt++;
> +	}
> +
> +	if (!known_pattern) {
> +		pool = create_mh_pool(ns->dev, &pattern, &hws_pool->mh_pools, cnt);
> +		if (IS_ERR(pool))
> +			return PTR_ERR(pool);
> +	}

if, by any chance, .mh_pools was empty, next line has @pool
uninitialized

> +	mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
> +	if (IS_ERR(mh_data)) {
> +		err = PTR_ERR(mh_data);
> +		goto destroy_pool;
> +	}
> +	hws_action = mh_data->bulk->hws_action;
> +	mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
> +	if (!mh_data->data) {
> +		err = -ENOMEM;
> +		goto release_mh;
> +	}
> +	modify_hdr->fs_hws_action.mh_data = mh_data;
> +	modify_hdr->fs_hws_action.fs_pool = pool;
> +	modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
> +	modify_hdr->fs_hws_action.hws_action = hws_action;
> +
> +	return 0;
> +
> +release_mh:
> +	mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
> +destroy_pool:
> +	if (!known_pattern)
> +		destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
> +	return err;
> +}

[...]

> +static struct mlx5_fs_bulk *
> +mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
> +{
> +	struct mlx5hws_action_mh_pattern *pattern;
> +	struct mlx5_flow_root_namespace *root_ns;
> +	struct mlx5_fs_hws_mh_bulk *mh_bulk;
> +	struct mlx5hws_context *ctx;
> +	int bulk_len;
> +	int i;

meld @i to prev line, or better declare within the for loop

> +
> +	root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
> +	if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
> +		return NULL;
> +
> +	ctx = root_ns->fs_hws_context.hws_ctx;
> +	if (!ctx)
> +		return NULL;
> +
> +	if (!pool_ctx)
> +		return NULL;

you could combine the two checks above

[...]

> +bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
> +			       struct mlx5hws_action_mh_pattern *pattern)
> +{
> +	struct mlx5hws_action_mh_pattern *pool_pattern;
> +	int num_actions, i;
> +
> +	pool_pattern = mh_pool->pool_ctx;
> +	if (WARN_ON_ONCE(!pool_pattern))
> +		return false;
> +
> +	if (pattern->sz != pool_pattern->sz)
> +		return false;
> +	num_actions = pattern->sz / MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
> +	for (i = 0; i < num_actions; i++)

missing braces

> +		if ((__force __be32)pattern->data[i] !=
> +		    (__force __be32)pool_pattern->data[i])
> +			return false;
> +	return true;
> +}
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 9b0575a61362..06ec48f51b6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -65,6 +65,7 @@  struct mlx5_modify_hdr {
 	enum mlx5_flow_resource_owner owner;
 	union {
 		struct mlx5_fs_dr_action fs_dr_action;
+		struct mlx5_fs_hws_action fs_hws_action;
 		u32 id;
 	};
 };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
index 723865140b2e..a75e5ce168c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -14,6 +14,8 @@  static struct mlx5hws_action *
 create_action_remove_header_vlan(struct mlx5hws_context *ctx);
 static void destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
 			    unsigned long index);
+static void destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
+			    unsigned long index);
 
 static int init_hws_actions_pool(struct mlx5_core_dev *dev,
 				 struct mlx5_fs_hws_context *fs_ctx)
@@ -56,6 +58,7 @@  static int init_hws_actions_pool(struct mlx5_core_dev *dev,
 		goto cleanup_insert_hdr;
 	xa_init(&hws_pool->el2tol3tnl_pools);
 	xa_init(&hws_pool->el2tol2tnl_pools);
+	xa_init(&hws_pool->mh_pools);
 	return 0;
 
 cleanup_insert_hdr:
@@ -81,6 +84,9 @@  static void cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
 	struct mlx5_fs_pool *pool;
 	unsigned long i;
 
+	xa_for_each(&hws_pool->mh_pools, i, pool)
+		destroy_mh_pool(pool, &hws_pool->mh_pools, i);
+	xa_destroy(&hws_pool->mh_pools);
 	xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
 		destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
 	xa_destroy(&hws_pool->el2tol2tnl_pools);
@@ -528,6 +534,115 @@  static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace
 	pkt_reformat->fs_hws_action.pr_data = NULL;
 }
 
+static struct mlx5_fs_pool *
+create_mh_pool(struct mlx5_core_dev *dev,
+	       struct mlx5hws_action_mh_pattern *pattern,
+	       struct xarray *mh_pools, unsigned long index)
+{
+	struct mlx5_fs_pool *pool;
+	int err;
+
+	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+	if (!pool)
+		return ERR_PTR(-ENOMEM);
+	err = mlx5_fs_hws_mh_pool_init(pool, dev, pattern);
+	if (err)
+		goto free_pool;
+	err = xa_insert(mh_pools, index, pool, GFP_KERNEL);
+	if (err)
+		goto cleanup_pool;
+	return pool;
+
+cleanup_pool:
+	mlx5_fs_hws_mh_pool_cleanup(pool);
+free_pool:
+	kfree(pool);
+	return ERR_PTR(err);
+}
+
+static void destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
+			    unsigned long index)
+{
+	xa_erase(mh_pools, index);
+	mlx5_fs_hws_mh_pool_cleanup(pool);
+	kfree(pool);
+}
+
+static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+					    u8 namespace, u8 num_actions,
+					    void *modify_actions,
+					    struct mlx5_modify_hdr *modify_hdr)
+{
+	struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
+	struct mlx5hws_action_mh_pattern pattern = {};
+	struct mlx5_fs_hws_mh *mh_data = NULL;
+	struct mlx5hws_action *hws_action;
+	struct mlx5_fs_pool *pool;
+	unsigned long i, cnt = 0;
+	bool known_pattern;
+	int err;
+
+	pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
+	pattern.data = modify_actions;
+
+	known_pattern = false;
+	xa_for_each(&hws_pool->mh_pools, i, pool) {
+		if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
+			known_pattern = true;
+			break;
+		}
+		cnt++;
+	}
+
+	if (!known_pattern) {
+		pool = create_mh_pool(ns->dev, &pattern, &hws_pool->mh_pools, cnt);
+		if (IS_ERR(pool))
+			return PTR_ERR(pool);
+	}
+	mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
+	if (IS_ERR(mh_data)) {
+		err = PTR_ERR(mh_data);
+		goto destroy_pool;
+	}
+	hws_action = mh_data->bulk->hws_action;
+	mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
+	if (!mh_data->data) {
+		err = -ENOMEM;
+		goto release_mh;
+	}
+	modify_hdr->fs_hws_action.mh_data = mh_data;
+	modify_hdr->fs_hws_action.fs_pool = pool;
+	modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
+	modify_hdr->fs_hws_action.hws_action = hws_action;
+
+	return 0;
+
+release_mh:
+	mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
+destroy_pool:
+	if (!known_pattern)
+		destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
+	return err;
+}
+
+static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+					       struct mlx5_modify_hdr *modify_hdr)
+{
+	struct mlx5_fs_hws_mh *mh_data;
+	struct mlx5_fs_pool *pool;
+
+	if (!modify_hdr->fs_hws_action.fs_pool || !modify_hdr->fs_hws_action.mh_data) {
+		mlx5_core_err(ns->dev, "Failed release modify-header\n");
+		return;
+	}
+
+	mh_data = modify_hdr->fs_hws_action.mh_data;
+	kfree(mh_data->data);
+	pool = modify_hdr->fs_hws_action.fs_pool;
+	mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
+	modify_hdr->fs_hws_action.mh_data = NULL;
+}
+
 static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = {
 	.create_flow_table = mlx5_cmd_hws_create_flow_table,
 	.destroy_flow_table = mlx5_cmd_hws_destroy_flow_table,
@@ -537,6 +652,8 @@  static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = {
 	.destroy_flow_group = mlx5_cmd_hws_destroy_flow_group,
 	.packet_reformat_alloc = mlx5_cmd_hws_packet_reformat_alloc,
 	.packet_reformat_dealloc = mlx5_cmd_hws_packet_reformat_dealloc,
+	.modify_header_alloc = mlx5_cmd_hws_modify_header_alloc,
+	.modify_header_dealloc = mlx5_cmd_hws_modify_header_dealloc,
 	.create_ns = mlx5_cmd_hws_create_ns,
 	.destroy_ns = mlx5_cmd_hws_destroy_ns,
 	.set_peer = mlx5_cmd_hws_set_peer,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
index 2292eb08ef24..db2d53fbf9d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
@@ -18,6 +18,7 @@  struct mlx5_fs_hws_actions_pool {
 	struct mlx5_fs_pool dl3tnltol2_pool;
 	struct xarray el2tol3tnl_pools;
 	struct xarray el2tol2tnl_pools;
+	struct xarray mh_pools;
 };
 
 struct mlx5_fs_hws_context {
@@ -34,6 +35,7 @@  struct mlx5_fs_hws_action {
 	struct mlx5hws_action *hws_action;
 	struct mlx5_fs_pool *fs_pool;
 	struct mlx5_fs_hws_pr *pr_data;
+	struct mlx5_fs_hws_mh *mh_data;
 };
 
 struct mlx5_fs_hws_matcher {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
index 14f732f3f09c..60dc0aaccbba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -236,3 +236,167 @@  struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data)
 {
 	return pr_data->bulk->hws_action;
 }
+
+static struct mlx5hws_action *
+mh_bulk_action_create(struct mlx5hws_context *ctx,
+		      struct mlx5hws_action_mh_pattern *pattern)
+{
+	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+	u32 log_bulk_size;
+
+	log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+	return mlx5hws_action_create_modify_header(ctx, 1, pattern,
+						   log_bulk_size, flags);
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
+{
+	struct mlx5hws_action_mh_pattern *pattern;
+	struct mlx5_flow_root_namespace *root_ns;
+	struct mlx5_fs_hws_mh_bulk *mh_bulk;
+	struct mlx5hws_context *ctx;
+	int bulk_len;
+	int i;
+
+	root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+	if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
+		return NULL;
+
+	ctx = root_ns->fs_hws_context.hws_ctx;
+	if (!ctx)
+		return NULL;
+
+	if (!pool_ctx)
+		return NULL;
+	pattern = pool_ctx;
+	bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
+	mh_bulk = kvzalloc(struct_size(mh_bulk, mhs_data, bulk_len), GFP_KERNEL);
+	if (!mh_bulk)
+		return NULL;
+
+	if (mlx5_fs_bulk_init(dev, &mh_bulk->fs_bulk, bulk_len))
+		goto free_mh_bulk;
+
+	for (i = 0; i < bulk_len; i++) {
+		mh_bulk->mhs_data[i].bulk = mh_bulk;
+		mh_bulk->mhs_data[i].offset = i;
+	}
+
+	mh_bulk->hws_action = mh_bulk_action_create(ctx, pattern);
+	if (!mh_bulk->hws_action)
+		goto cleanup_fs_bulk;
+
+	return &mh_bulk->fs_bulk;
+
+cleanup_fs_bulk:
+	mlx5_fs_bulk_cleanup(&mh_bulk->fs_bulk);
+free_mh_bulk:
+	kvfree(mh_bulk);
+	return NULL;
+}
+
+static int
+mlx5_fs_hws_mh_bulk_destroy(struct mlx5_core_dev *dev,
+			    struct mlx5_fs_bulk *fs_bulk)
+{
+	struct mlx5_fs_hws_mh_bulk *mh_bulk;
+
+	mh_bulk = container_of(fs_bulk, struct mlx5_fs_hws_mh_bulk, fs_bulk);
+	if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
+		mlx5_core_err(dev, "Freeing bulk before all modify header were released\n");
+		return -EBUSY;
+	}
+
+	mlx5hws_action_destroy(mh_bulk->hws_action);
+	mlx5_fs_bulk_cleanup(fs_bulk);
+	kvfree(mh_bulk);
+
+	return 0;
+}
+
+static const struct mlx5_fs_pool_ops mlx5_fs_hws_mh_pool_ops = {
+	.bulk_create = mlx5_fs_hws_mh_bulk_create,
+	.bulk_destroy = mlx5_fs_hws_mh_bulk_destroy,
+	.update_threshold = mlx5_hws_pool_update_threshold,
+};
+
+int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
+			     struct mlx5_core_dev *dev,
+			     struct mlx5hws_action_mh_pattern *pattern)
+{
+	struct mlx5hws_action_mh_pattern *pool_pattern;
+
+	pool_pattern = kzalloc(sizeof(*pool_pattern), GFP_KERNEL);
+	if (!pool_pattern)
+		return -ENOMEM;
+	pool_pattern->data = kmemdup(pattern->data, pattern->sz, GFP_KERNEL);
+	if (!pool_pattern->data) {
+		kfree(pool_pattern);
+		return -ENOMEM;
+	}
+	pool_pattern->sz = pattern->sz;
+	mlx5_fs_pool_init(fs_hws_mh_pool, dev, &mlx5_fs_hws_mh_pool_ops,
+			  pool_pattern);
+	return 0;
+}
+
+void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool)
+{
+	struct mlx5hws_action_mh_pattern *pool_pattern;
+
+	mlx5_fs_pool_cleanup(fs_hws_mh_pool);
+	pool_pattern = fs_hws_mh_pool->pool_ctx;
+	if (!pool_pattern)
+		return;
+	kfree(pool_pattern->data);
+	kfree(pool_pattern);
+}
+
+struct mlx5_fs_hws_mh *
+mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool)
+{
+	struct mlx5_fs_pool_index pool_index = {};
+	struct mlx5_fs_hws_mh_bulk *mh_bulk;
+	int err;
+
+	err = mlx5_fs_pool_acquire_index(mh_pool, &pool_index);
+	if (err)
+		return ERR_PTR(err);
+	mh_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_mh_bulk,
+			       fs_bulk);
+	return &mh_bulk->mhs_data[pool_index.index];
+}
+
+void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
+				    struct mlx5_fs_hws_mh *mh_data)
+{
+	struct mlx5_fs_bulk *fs_bulk = &mh_data->bulk->fs_bulk;
+	struct mlx5_fs_pool_index pool_index = {};
+	struct mlx5_core_dev *dev = mh_pool->dev;
+
+	pool_index.fs_bulk = fs_bulk;
+	pool_index.index = mh_data->offset;
+	if (mlx5_fs_pool_release_index(mh_pool, &pool_index))
+		mlx5_core_warn(dev, "Attempted to release modify header which is not acquired\n");
+}
+
+bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
+			       struct mlx5hws_action_mh_pattern *pattern)
+{
+	struct mlx5hws_action_mh_pattern *pool_pattern;
+	int num_actions, i;
+
+	pool_pattern = mh_pool->pool_ctx;
+	if (WARN_ON_ONCE(!pool_pattern))
+		return false;
+
+	if (pattern->sz != pool_pattern->sz)
+		return false;
+	num_actions = pattern->sz / MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
+	for (i = 0; i < num_actions; i++)
+		if ((__force __be32)pattern->data[i] !=
+		    (__force __be32)pool_pattern->data[i])
+			return false;
+	return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
index 93ec5b3b76fe..eda17031aef0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
@@ -36,6 +36,19 @@  struct mlx5_fs_hws_pr_pool_ctx {
 	size_t encap_data_size;
 };
 
+struct mlx5_fs_hws_mh {
+	struct mlx5_fs_hws_mh_bulk *bulk;
+	u32 offset;
+	u8 *data;
+};
+
+struct mlx5_fs_hws_mh_bulk {
+	struct mlx5_fs_bulk fs_bulk;
+	struct mlx5_fs_pool *mh_pool;
+	struct mlx5hws_action *hws_action;
+	struct mlx5_fs_hws_mh mhs_data[];
+};
+
 int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
 			     struct mlx5_core_dev *dev, size_t encap_data_size,
 			     enum mlx5hws_action_type reformat_type);
@@ -45,4 +58,13 @@  struct mlx5_fs_hws_pr *mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_po
 void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
 				    struct mlx5_fs_hws_pr *pr_data);
 struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data);
+int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
+			     struct mlx5_core_dev *dev,
+			     struct mlx5hws_action_mh_pattern *pattern);
+void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool);
+struct mlx5_fs_hws_mh *mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool);
+void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
+				    struct mlx5_fs_hws_mh *mh_data);
+bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
+			       struct mlx5hws_action_mh_pattern *pattern);
 #endif /* __MLX5_FS_HWS_POOLS_H__ */