diff mbox series

[net-next,12/15] net/mlx5: HWS, added backward-compatible API handling

Message ID 20240903031948.78006-13-saeed@kernel.org (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series [net-next,01/15] net/mlx5: Added missing mlx5_ifc definition for HW Steering | expand

Checks

Context Check Description
netdev/series_format success Pull request is its own cover letter
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 16 this patch: 16
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 4 of 4 maintainers
netdev/build_clang success Errors and warnings before: 17 this patch: 17
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 22 this patch: 22
netdev/checkpatch warning WARNING: Block comments should align the * on each line WARNING: added, moved or deleted file(s), does MAINTAINERS need updating? WARNING: line length of 103 exceeds 80 columns WARNING: line length of 106 exceeds 80 columns WARNING: line length of 81 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 86 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 89 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns WARNING: line length of 94 exceeds 80 columns WARNING: line length of 95 exceeds 80 columns WARNING: line length of 96 exceeds 80 columns WARNING: line length of 97 exceeds 80 columns WARNING: line length of 98 exceeds 80 columns WARNING: line length of 99 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Saeed Mahameed Sept. 3, 2024, 3:19 a.m. UTC
From: Yevgeny Kliteynik <kliteyn@nvidia.com>

Added implementation of backward-compatible (BWC) steering API.
Native HWS API is very different from SWS API:
 - SWS is synchronous (rule creation/deletion API call returns when
   the rule is created/deleted), while HWS is asynchronous (it
   requires polling for completion in order to know when the rule
   creation/deletion happened)
 - SWS manages its own memory (it allocates/frees all the needed
   memory for steering rules, while HWS requires the rules memory
   to be allocated/freed outside the API

In order to make HWS fit the existing fs-core steering API paradigm,
this patch adds implementation of backward-compatible (BWC) steering
API that has the bahaviour similar to SWS: among others, it encompasses
all the rules' memory management and completion polling, presenting
the usual synchronous API for the upper layer.

A user that wishes to utilize the full speed potential of HWS can
call the HWS async API and have rule insertion/deletion batching,
lower memory management overhead, and lower CPU utilization.
Such approach will be taken by the future Connection Tracking.

Note that BWC steering doesn't support yet rules that require more
than one match STE - complex rules.
This support will be added later on.

Reviewed-by: Erez Shitrit <erezsh@nvidia.com>
Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
 .../mlx5/core/steering/hws/mlx5hws_bwc.c      | 994 ++++++++++++++++++
 .../mlx5/core/steering/hws/mlx5hws_bwc.h      |  73 ++
 .../core/steering/hws/mlx5hws_bwc_complex.c   |  86 ++
 .../core/steering/hws/mlx5hws_bwc_complex.h   |  29 +
 4 files changed, 1182 insertions(+)
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c
new file mode 100644
index 000000000000..b4000601edf6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c
@@ -0,0 +1,994 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+static u16 hws_bwc_gen_queue_idx(struct mlx5hws_context *ctx)
+{
+	/* assign random queue */
+	return get_random_u8() % mlx5hws_bwc_queues(ctx);
+}
+
+static u16
+hws_bwc_get_burst_th(struct mlx5hws_context *ctx, u16 queue_id)
+{
+	return min(ctx->send_queue[queue_id].num_entries / 2,
+		   MLX5HWS_BWC_MATCHER_REHASH_BURST_TH);
+}
+
+static struct mutex *
+hws_bwc_get_queue_lock(struct mlx5hws_context *ctx, u16 idx)
+{
+	return &ctx->bwc_send_queue_locks[idx];
+}
+
+static void hws_bwc_lock_all_queues(struct mlx5hws_context *ctx)
+{
+	u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+	struct mutex *queue_lock; /* Protect the queue */
+	int i;
+
+	for (i = 0; i < bwc_queues; i++) {
+		queue_lock = hws_bwc_get_queue_lock(ctx, i);
+		mutex_lock(queue_lock);
+	}
+}
+
+static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
+{
+	u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+	struct mutex *queue_lock; /* Protect the queue */
+	int i = bwc_queues;
+
+	while (i--) {
+		queue_lock = hws_bwc_get_queue_lock(ctx, i);
+		mutex_unlock(queue_lock);
+	}
+}
+
+static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
+				      u32 priority,
+				      u8 size_log)
+{
+	memset(attr, 0, sizeof(*attr));
+
+	attr->priority = priority;
+	attr->optimize_using_rule_idx = 0;
+	attr->mode = MLX5HWS_MATCHER_RESOURCE_MODE_RULE;
+	attr->optimize_flow_src = MLX5HWS_MATCHER_FLOW_SRC_ANY;
+	attr->insert_mode = MLX5HWS_MATCHER_INSERT_BY_HASH;
+	attr->distribute_mode = MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH;
+	attr->rule.num_log = size_log;
+	attr->resizable = true;
+	attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+}
+
+int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
+				      struct mlx5hws_table *table,
+				      u32 priority,
+				      u8 match_criteria_enable,
+				      struct mlx5hws_match_parameters *mask,
+				      enum mlx5hws_action_type action_types[])
+{
+	enum mlx5hws_action_type init_action_types[1] = { MLX5HWS_ACTION_TYP_LAST };
+	struct mlx5hws_context *ctx = table->ctx;
+	u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+	struct mlx5hws_matcher_attr attr = {0};
+	int i;
+
+	bwc_matcher->rules = kcalloc(bwc_queues, sizeof(*bwc_matcher->rules), GFP_KERNEL);
+	if (!bwc_matcher->rules)
+		goto err;
+
+	for (i = 0; i < bwc_queues; i++)
+		INIT_LIST_HEAD(&bwc_matcher->rules[i]);
+
+	hws_bwc_matcher_init_attr(&attr,
+				  priority,
+				  MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG);
+
+	bwc_matcher->priority = priority;
+	bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+
+	/* create dummy action template */
+	bwc_matcher->at[0] =
+		mlx5hws_action_template_create(action_types ?
+					       action_types : init_action_types);
+	if (!bwc_matcher->at[0]) {
+		mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
+		goto free_bwc_matcher_rules;
+	}
+
+	bwc_matcher->num_of_at = 1;
+
+	bwc_matcher->mt = mlx5hws_match_template_create(ctx,
+							mask->match_buf,
+							mask->match_sz,
+							match_criteria_enable);
+	if (!bwc_matcher->mt) {
+		mlx5hws_err(table->ctx, "BWC matcher: failed creating match template\n");
+		goto free_at;
+	}
+
+	bwc_matcher->matcher = mlx5hws_matcher_create(table,
+						      &bwc_matcher->mt, 1,
+						      &bwc_matcher->at[0],
+						      bwc_matcher->num_of_at,
+						      &attr);
+	if (!bwc_matcher->matcher) {
+		mlx5hws_err(table->ctx, "BWC matcher: failed creating HWS matcher\n");
+		goto free_mt;
+	}
+
+	return 0;
+
+free_mt:
+	mlx5hws_match_template_destroy(bwc_matcher->mt);
+free_at:
+	mlx5hws_action_template_destroy(bwc_matcher->at[0]);
+free_bwc_matcher_rules:
+	kfree(bwc_matcher->rules);
+err:
+	return -EINVAL;
+}
+
+struct mlx5hws_bwc_matcher *
+mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
+			   u32 priority,
+			   u8 match_criteria_enable,
+			   struct mlx5hws_match_parameters *mask)
+{
+	struct mlx5hws_bwc_matcher *bwc_matcher;
+	bool is_complex;
+	int ret;
+
+	if (!mlx5hws_context_bwc_supported(table->ctx)) {
+		mlx5hws_err(table->ctx,
+			    "BWC matcher: context created w/o BWC API compatibility\n");
+		return NULL;
+	}
+
+	bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL);
+	if (!bwc_matcher)
+		return NULL;
+
+	/* Check if the required match params can be all matched
+	 * in single STE, otherwise complex matcher is needed.
+	 */
+
+	is_complex = mlx5hws_bwc_match_params_is_complex(table->ctx, match_criteria_enable, mask);
+	if (is_complex)
+		ret = mlx5hws_bwc_matcher_create_complex(bwc_matcher,
+							 table,
+							 priority,
+							 match_criteria_enable,
+							 mask);
+	else
+		ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
+							table,
+							priority,
+							match_criteria_enable,
+							mask,
+							NULL);
+	if (ret)
+		goto free_bwc_matcher;
+
+	return bwc_matcher;
+
+free_bwc_matcher:
+	kfree(bwc_matcher);
+
+	return NULL;
+}
+
+int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+	int i;
+
+	mlx5hws_matcher_destroy(bwc_matcher->matcher);
+	bwc_matcher->matcher = NULL;
+
+	for (i = 0; i < bwc_matcher->num_of_at; i++)
+		mlx5hws_action_template_destroy(bwc_matcher->at[i]);
+
+	mlx5hws_match_template_destroy(bwc_matcher->mt);
+	kfree(bwc_matcher->rules);
+
+	return 0;
+}
+
+int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+	if (bwc_matcher->num_of_rules)
+		mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
+			    "BWC matcher destroy: matcher still has %d rules\n",
+			    bwc_matcher->num_of_rules);
+
+	mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+
+	kfree(bwc_matcher);
+	return 0;
+}
+
+static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+			      u16 queue_id,
+			      u32 *pending_rules,
+			      bool drain)
+{
+	struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
+	u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
+	bool got_comp = *pending_rules >= burst_th;
+	bool queue_full;
+	int err = 0;
+	int ret;
+	int i;
+
+	/* Check if there are any completions at all */
+	if (!got_comp && !drain)
+		return 0;
+
+	queue_full = mlx5hws_send_engine_full(&ctx->send_queue[queue_id]);
+	while (queue_full || ((got_comp || drain) && *pending_rules)) {
+		ret = mlx5hws_send_queue_poll(ctx, queue_id, comp, burst_th);
+		if (unlikely(ret < 0)) {
+			mlx5hws_err(ctx, "BWC poll error: polling queue %d returned %d\n",
+				    queue_id, ret);
+			return -EINVAL;
+		}
+
+		if (ret) {
+			(*pending_rules) -= ret;
+			for (i = 0; i < ret; i++) {
+				if (unlikely(comp[i].status != MLX5HWS_FLOW_OP_SUCCESS)) {
+					mlx5hws_err(ctx,
+						    "BWC poll error: polling queue %d returned completion with error\n",
+						    queue_id);
+					err = -EINVAL;
+				}
+			}
+			queue_full = false;
+		}
+
+		got_comp = !!ret;
+	}
+
+	return err;
+}
+
+void
+mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+			   u16 bwc_queue_idx,
+			   u32 flow_source,
+			   struct mlx5hws_rule_attr *rule_attr)
+{
+	struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+
+	/* no use of INSERT_BY_INDEX in bwc rule */
+	rule_attr->rule_idx = 0;
+
+	/* notify HW at each rule insertion/deletion */
+	rule_attr->burst = 0;
+
+	/* We don't need user data, but the API requires it to exist */
+	rule_attr->user_data = (void *)0xFACADE;
+
+	rule_attr->queue_id = mlx5hws_bwc_get_queue_id(ctx, bwc_queue_idx);
+	rule_attr->flow_source = flow_source;
+}
+
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+	struct mlx5hws_bwc_rule *bwc_rule;
+
+	bwc_rule = kzalloc(sizeof(*bwc_rule), GFP_KERNEL);
+	if (unlikely(!bwc_rule))
+		goto out_err;
+
+	bwc_rule->rule = kzalloc(sizeof(*bwc_rule->rule), GFP_KERNEL);
+	if (unlikely(!bwc_rule->rule))
+		goto free_rule;
+
+	bwc_rule->bwc_matcher = bwc_matcher;
+	return bwc_rule;
+
+free_rule:
+	kfree(bwc_rule);
+out_err:
+	return NULL;
+}
+
+void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule)
+{
+	if (likely(bwc_rule->rule))
+		kfree(bwc_rule->rule);
+	kfree(bwc_rule);
+}
+
+static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
+{
+	struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+	bwc_matcher->num_of_rules++;
+	bwc_rule->bwc_queue_idx = idx;
+	list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
+}
+
+static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
+{
+	struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+	bwc_matcher->num_of_rules--;
+	list_del_init(&bwc_rule->list_node);
+}
+
+static int
+hws_bwc_rule_destroy_hws_async(struct mlx5hws_bwc_rule *bwc_rule,
+			       struct mlx5hws_rule_attr *attr)
+{
+	return mlx5hws_rule_destroy(bwc_rule->rule, attr);
+}
+
+static int
+hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
+			      struct mlx5hws_rule_attr *rule_attr)
+{
+	struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+	struct mlx5hws_flow_op_result completion;
+	int ret;
+
+	ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
+	if (unlikely(ret))
+		return ret;
+
+	do {
+		ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
+	} while (ret != 1);
+
+	if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS ||
+		     (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
+		      bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) {
+		mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n",
+			    completion.status, bwc_rule->rule->status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
+{
+	struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+	struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+	u16 idx = bwc_rule->bwc_queue_idx;
+	struct mlx5hws_rule_attr attr;
+	struct mutex *queue_lock; /* Protect the queue */
+	int ret;
+
+	mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &attr);
+
+	queue_lock = hws_bwc_get_queue_lock(ctx, idx);
+
+	mutex_lock(queue_lock);
+
+	ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
+	hws_bwc_rule_list_remove(bwc_rule);
+
+	mutex_unlock(queue_lock);
+
+	return ret;
+}
+
+int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
+{
+	int ret;
+
+	ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+
+	mlx5hws_bwc_rule_free(bwc_rule);
+	return ret;
+}
+
+static int
+hws_bwc_rule_create_async(struct mlx5hws_bwc_rule *bwc_rule,
+			  u32 *match_param,
+			  u8 at_idx,
+			  struct mlx5hws_rule_action rule_actions[],
+			  struct mlx5hws_rule_attr *rule_attr)
+{
+	return mlx5hws_rule_create(bwc_rule->bwc_matcher->matcher,
+				   0, /* only one match template supported */
+				   match_param,
+				   at_idx,
+				   rule_actions,
+				   rule_attr,
+				   bwc_rule->rule);
+}
+
+static int
+hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
+			 u32 *match_param,
+			 u8 at_idx,
+			 struct mlx5hws_rule_action rule_actions[],
+			 struct mlx5hws_rule_attr *rule_attr)
+
+{
+	struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+	u32 expected_completions = 1;
+	int ret;
+
+	ret = hws_bwc_rule_create_async(bwc_rule, match_param,
+					at_idx, rule_actions,
+					rule_attr);
+	if (unlikely(ret))
+		return ret;
+
+	ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+
+	return ret;
+}
+
+static int
+hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
+			 u8 at_idx,
+			 struct mlx5hws_rule_action rule_actions[],
+			 struct mlx5hws_rule_attr *rule_attr)
+{
+	struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+	struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+	u32 expected_completions = 1;
+	int ret;
+
+	ret = mlx5hws_rule_action_update(bwc_rule->rule,
+					 at_idx,
+					 rule_actions,
+					 rule_attr);
+	if (unlikely(ret))
+		return ret;
+
+	ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+	if (unlikely(ret))
+		mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
+
+	return ret;
+}
+
+static bool
+hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+	struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
+
+	return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >=
+	       caps->ste_alloc_log_max - 1;
+}
+
+static bool
+hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher *bwc_matcher,
+				   u32 num_of_rules)
+{
+	if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher)))
+		return false;
+
+	if (unlikely((num_of_rules * 100 / MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH) >=
+		     (1UL << bwc_matcher->size_log)))
+		return true;
+
+	return false;
+}
+
+static void
+hws_bwc_rule_actions_to_action_types(struct mlx5hws_rule_action rule_actions[],
+				     enum mlx5hws_action_type action_types[])
+{
+	int i = 0;
+
+	for (i = 0;
+	     rule_actions[i].action && (rule_actions[i].action->type != MLX5HWS_ACTION_TYP_LAST);
+	     i++) {
+		action_types[i] = (enum mlx5hws_action_type)rule_actions[i].action->type;
+	}
+
+	action_types[i] = MLX5HWS_ACTION_TYP_LAST;
+}
+
+static int
+hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
+			  struct mlx5hws_rule_action rule_actions[])
+{
+	enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS];
+
+	hws_bwc_rule_actions_to_action_types(rule_actions, action_types);
+
+	bwc_matcher->at[bwc_matcher->num_of_at] =
+		mlx5hws_action_template_create(action_types);
+
+	if (unlikely(!bwc_matcher->at[bwc_matcher->num_of_at]))
+		return -ENOMEM;
+
+	bwc_matcher->num_of_at++;
+	return 0;
+}
+
+static int
+hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+	struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+	struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+	if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher))) {
+		mlx5hws_err(ctx, "Can't resize matcher: depth exceeds limit %d\n",
+			    caps->rtc_log_depth_max);
+		return -ENOMEM;
+	}
+
+	bwc_matcher->size_log =
+		min(bwc_matcher->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+		    caps->ste_alloc_log_max - MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH);
+
+	return 0;
+}
+
+static int
+hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
+			struct mlx5hws_rule_action rule_actions[])
+{
+	enum mlx5hws_action_type *action_type_arr;
+	int i, j;
+
+	/* start from index 1 - first action template is a dummy */
+	for (i = 1; i < bwc_matcher->num_of_at; i++) {
+		j = 0;
+		action_type_arr = bwc_matcher->at[i]->action_type_arr;
+
+		while (rule_actions[j].action &&
+		       rule_actions[j].action->type != MLX5HWS_ACTION_TYP_LAST) {
+			if (action_type_arr[j] != rule_actions[j].action->type)
+				break;
+			j++;
+		}
+
+		if (action_type_arr[j] == MLX5HWS_ACTION_TYP_LAST &&
+		    (!rule_actions[j].action ||
+		     rule_actions[j].action->type == MLX5HWS_ACTION_TYP_LAST))
+			return i;
+	}
+
+	return -1;
+}
+
+static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+	struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+	u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+	struct mlx5hws_bwc_rule **bwc_rules;
+	struct mlx5hws_rule_attr rule_attr;
+	u32 *pending_rules;
+	int i, j, ret = 0;
+	bool all_done;
+	u16 burst_th;
+
+	mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+
+	pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL);
+	if (!pending_rules)
+		return -ENOMEM;
+
+	bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL);
+	if (!bwc_rules) {
+		ret = -ENOMEM;
+		goto free_pending_rules;
+	}
+
+	for (i = 0; i < bwc_queues; i++) {
+		if (list_empty(&bwc_matcher->rules[i]))
+			bwc_rules[i] = NULL;
+		else
+			bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i],
+							struct mlx5hws_bwc_rule,
+							list_node);
+	}
+
+	do {
+		all_done = true;
+
+		for (i = 0; i < bwc_queues; i++) {
+			rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+			burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
+
+			for (j = 0; j < burst_th && bwc_rules[i]; j++) {
+				rule_attr.burst = !!((j + 1) % burst_th);
+				ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher,
+								       bwc_rules[i]->rule,
+								       &rule_attr);
+				if (unlikely(ret)) {
+					mlx5hws_err(ctx,
+						    "Moving BWC rule failed during rehash (%d)\n",
+						    ret);
+					goto free_bwc_rules;
+				}
+
+				all_done = false;
+				pending_rules[i]++;
+				bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node,
+							    &bwc_matcher->rules[i]) ?
+					       NULL : list_next_entry(bwc_rules[i], list_node);
+
+				ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
+							 &pending_rules[i], false);
+				if (unlikely(ret))
+					goto free_bwc_rules;
+			}
+		}
+	} while (!all_done);
+
+	/* drain all the bwc queues */
+	for (i = 0; i < bwc_queues; i++) {
+		if (pending_rules[i]) {
+			u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+
+			mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
+			ret = hws_bwc_queue_poll(ctx, queue_id,
+						 &pending_rules[i], true);
+			if (unlikely(ret))
+				goto free_bwc_rules;
+		}
+	}
+
+free_bwc_rules:
+	kfree(bwc_rules);
+free_pending_rules:
+	kfree(pending_rules);
+
+	return ret;
+}
+
+static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+	return hws_bwc_matcher_move_all_simple(bwc_matcher);
+}
+
+static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+	struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+	struct mlx5hws_matcher_attr matcher_attr = {0};
+	struct mlx5hws_matcher *old_matcher;
+	struct mlx5hws_matcher *new_matcher;
+	int ret;
+
+	hws_bwc_matcher_init_attr(&matcher_attr,
+				  bwc_matcher->priority,
+				  bwc_matcher->size_log);
+
+	old_matcher = bwc_matcher->matcher;
+	new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
+					     &bwc_matcher->mt, 1,
+					     bwc_matcher->at,
+					     bwc_matcher->num_of_at,
+					     &matcher_attr);
+	if (!new_matcher) {
+		mlx5hws_err(ctx, "Rehash error: matcher creation failed\n");
+		return -ENOMEM;
+	}
+
+	ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher);
+	if (ret) {
+		mlx5hws_err(ctx, "Rehash error: failed setting resize target\n");
+		return ret;
+	}
+
+	ret = hws_bwc_matcher_move_all(bwc_matcher);
+	if (ret) {
+		mlx5hws_err(ctx, "Rehash error: moving rules failed\n");
+		return -ENOMEM;
+	}
+
+	bwc_matcher->matcher = new_matcher;
+	mlx5hws_matcher_destroy(old_matcher);
+
+	return 0;
+}
+
+static int
+hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+	u32 num_of_rules;
+	int ret;
+
+	/* If the current matcher size is already at its max size, we can't
+	 * do the rehash. Skip it and try adding the rule again - perhaps
+	 * there was some change.
+	 */
+	if (hws_bwc_matcher_size_maxed_out(bwc_matcher))
+		return 0;
+
+	/* It is possible that other rule has already performed rehash.
+	 * Need to check again if we really need rehash.
+	 * If the reason for rehash was size, but not any more - skip rehash.
+	 */
+	num_of_rules = __atomic_load_n(&bwc_matcher->num_of_rules, __ATOMIC_RELAXED);
+	if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))
+		return 0;
+
+	/* Now we're done all the checking - do the rehash:
+	 *  - extend match RTC size
+	 *  - create new matcher
+	 *  - move all the rules to the new matcher
+	 *  - destroy the old matcher
+	 */
+
+	ret = hws_bwc_matcher_extend_size(bwc_matcher);
+	if (ret)
+		return ret;
+
+	return hws_bwc_matcher_move(bwc_matcher);
+}
+
+static int
+hws_bwc_matcher_rehash_at(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+	/* Rehash by action template doesn't require any additional checking.
+	 * The bwc_matcher already contains the new action template.
+	 * Just do the usual rehash:
+	 *  - create new matcher
+	 *  - move all the rules to the new matcher
+	 *  - destroy the old matcher
+	 */
+	return hws_bwc_matcher_move(bwc_matcher);
+}
+
+int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+				   u32 *match_param,
+				   struct mlx5hws_rule_action rule_actions[],
+				   u32 flow_source,
+				   u16 bwc_queue_idx)
+{
+	struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+	struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+	struct mlx5hws_rule_attr rule_attr;
+	struct mutex *queue_lock; /* Protect the queue */
+	u32 num_of_rules;
+	int ret = 0;
+	int at_idx;
+
+	mlx5hws_bwc_rule_fill_attr(bwc_matcher, bwc_queue_idx, flow_source, &rule_attr);
+
+	queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+
+	mutex_lock(queue_lock);
+
+	/* check if rehash needed due to missing action template */
+	at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+	if (unlikely(at_idx < 0)) {
+		/* we need to extend BWC matcher action templates array */
+		mutex_unlock(queue_lock);
+		hws_bwc_lock_all_queues(ctx);
+
+		ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+		if (unlikely(ret)) {
+			hws_bwc_unlock_all_queues(ctx);
+			return ret;
+		}
+
+		/* action templates array was extended, we need the last idx */
+		at_idx = bwc_matcher->num_of_at - 1;
+
+		ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+						bwc_matcher->at[at_idx]);
+		if (unlikely(ret)) {
+			/* Action template attach failed, possibly due to
+			 * requiring more action STEs.
+			 * Need to attempt creating new matcher with all
+			 * the action templates, including the new one.
+			 */
+			ret = hws_bwc_matcher_rehash_at(bwc_matcher);
+			if (unlikely(ret)) {
+				mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
+				bwc_matcher->at[at_idx] = NULL;
+				bwc_matcher->num_of_at--;
+
+				hws_bwc_unlock_all_queues(ctx);
+
+				mlx5hws_err(ctx,
+					    "BWC rule insertion: rehash AT failed (%d)\n", ret);
+				return ret;
+			}
+		}
+
+		hws_bwc_unlock_all_queues(ctx);
+		mutex_lock(queue_lock);
+	}
+
+	/* check if number of rules require rehash */
+	num_of_rules = bwc_matcher->num_of_rules;
+
+	if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
+		mutex_unlock(queue_lock);
+
+		hws_bwc_lock_all_queues(ctx);
+		ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+		hws_bwc_unlock_all_queues(ctx);
+
+		if (ret) {
+			mlx5hws_err(ctx, "BWC rule insertion: rehash size [%d -> %d] failed (%d)\n",
+				    bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+				    bwc_matcher->size_log,
+				    ret);
+			return ret;
+		}
+
+		mutex_lock(queue_lock);
+	}
+
+	ret = hws_bwc_rule_create_sync(bwc_rule,
+				       match_param,
+				       at_idx,
+				       rule_actions,
+				       &rule_attr);
+	if (likely(!ret)) {
+		hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
+		mutex_unlock(queue_lock);
+		return 0; /* rule inserted successfully */
+	}
+
+	/* At this point the rule wasn't added.
+	 * It could be because there was collision, or some other problem.
+	 * If we don't dive deeper than API, the only thing we know is that
+	 * the status of completion is RTE_FLOW_OP_ERROR.
+	 * Try rehash by size and insert rule again - last chance.
+	 */
+
+	mutex_unlock(queue_lock);
+
+	hws_bwc_lock_all_queues(ctx);
+	ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+	hws_bwc_unlock_all_queues(ctx);
+
+	if (ret) {
+		mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
+		return ret;
+	}
+
+	/* Rehash done, but we still have that pesky rule to add */
+	mutex_lock(queue_lock);
+
+	ret = hws_bwc_rule_create_sync(bwc_rule,
+				       match_param,
+				       at_idx,
+				       rule_actions,
+				       &rule_attr);
+
+	if (unlikely(ret)) {
+		mutex_unlock(queue_lock);
+		mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
+		return ret;
+	}
+
+	hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
+	mutex_unlock(queue_lock);
+
+	return 0;
+}
+
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+			struct mlx5hws_match_parameters *params,
+			u32 flow_source,
+			struct mlx5hws_rule_action rule_actions[])
+{
+	struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+	struct mlx5hws_bwc_rule *bwc_rule;
+	u16 bwc_queue_idx;
+	int ret;
+
+	if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
+		mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
+		return NULL;
+	}
+
+	bwc_rule = mlx5hws_bwc_rule_alloc(bwc_matcher);
+	if (unlikely(!bwc_rule))
+		return NULL;
+
+	bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
+
+	ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
+					     params->match_buf,
+					     rule_actions,
+					     flow_source,
+					     bwc_queue_idx);
+	if (unlikely(ret)) {
+		mlx5hws_bwc_rule_free(bwc_rule);
+		return NULL;
+	}
+
+	return bwc_rule;
+}
+
+static int
+hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+			   struct mlx5hws_rule_action rule_actions[])
+{
+	struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+	struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+	struct mlx5hws_rule_attr rule_attr;
+	struct mutex *queue_lock; /* Protect the queue */
+	int at_idx, ret;
+	u16 idx;
+
+	idx = bwc_rule->bwc_queue_idx;
+
+	mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &rule_attr);
+	queue_lock = hws_bwc_get_queue_lock(ctx, idx);
+
+	mutex_lock(queue_lock);
+
+	/* check if rehash needed due to missing action template */
+	at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+	if (unlikely(at_idx < 0)) {
+		/* we need to extend BWC matcher action templates array */
+		mutex_unlock(queue_lock);
+		hws_bwc_lock_all_queues(ctx);
+
+		/* check again - perhaps other thread already did extend_at */
+		at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+		if (likely(at_idx < 0)) {
+			ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+			if (unlikely(ret)) {
+				hws_bwc_unlock_all_queues(ctx);
+				mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret);
+				return -EINVAL;
+			}
+
+			/* action templates array was extended, we need the last idx */
+			at_idx = bwc_matcher->num_of_at - 1;
+
+			ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher, bwc_matcher->at[at_idx]);
+			if (unlikely(ret)) {
+				/* Action template attach failed, possibly due to
+				* requiring more action STEs.
+				* Need to attempt creating new matcher with all
+				* the action templates, including the new one.
+				*/
+				ret = hws_bwc_matcher_rehash_at(bwc_matcher);
+				if (unlikely(ret)) {
+					mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
+					bwc_matcher->at[at_idx] = NULL;
+					bwc_matcher->num_of_at--;
+
+					hws_bwc_unlock_all_queues(ctx);
+
+					mlx5hws_err(ctx, "BWC rule update: rehash AT failed (%d)\n", ret);
+					return ret;
+				}
+			}
+		}
+
+		hws_bwc_unlock_all_queues(ctx);
+		mutex_lock(queue_lock);
+	}
+
+	ret = hws_bwc_rule_update_sync(bwc_rule,
+				       at_idx,
+				       rule_actions,
+				       &rule_attr);
+	mutex_unlock(queue_lock);
+
+	if (unlikely(ret))
+		mlx5hws_err(ctx, "BWC rule: update failed (%d)\n", ret);
+
+	return ret;
+}
+
+int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+				   struct mlx5hws_rule_action rule_actions[])
+{
+	struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+	struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+
+	if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
+		mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
+		return -EINVAL;
+	}
+
+	return hws_bwc_rule_action_update(bwc_rule, rule_actions);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
new file mode 100644
index 000000000000..4fe8c32d8fbe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
@@ -0,0 +1,73 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_BWC_H_
+#define MLX5HWS_BWC_H_
+
+#define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1
+#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
+#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
+#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
+#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255
+
+#define MLX5HWS_BWC_MAX_ACTS 16
+
+struct mlx5hws_bwc_matcher {
+	struct mlx5hws_matcher *matcher;
+	struct mlx5hws_match_template *mt;
+	struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
+	u8 num_of_at;
+	u16 priority;
+	u8 size_log;
+	u32 num_of_rules; /* atomically accessed */
+	struct list_head *rules;
+};
+
+struct mlx5hws_bwc_rule {
+	struct mlx5hws_bwc_matcher *bwc_matcher;
+	struct mlx5hws_rule *rule;
+	u16 bwc_queue_idx;
+	struct list_head list_node;
+};
+
+int
+mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
+				  struct mlx5hws_table *table,
+				  u32 priority,
+				  u8 match_criteria_enable,
+				  struct mlx5hws_match_parameters *mask,
+				  enum mlx5hws_action_type action_types[]);
+
+int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+struct mlx5hws_bwc_rule *mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule);
+
+int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+				   u32 *match_param,
+				   struct mlx5hws_rule_action rule_actions[],
+				   u32 flow_source,
+				   u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule);
+
+void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+				u16 bwc_queue_idx,
+				u32 flow_source,
+				struct mlx5hws_rule_attr *rule_attr);
+
+static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
+{
+	/* Besides the control queue, half of the queues are
+	 * reguler HWS queues, and the other half are BWC queues.
+	 */
+	return (ctx->queues - 1) / 2;
+}
+
+static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
+{
+	return idx + mlx5hws_bwc_queues(ctx);
+}
+
+#endif /* MLX5HWS_BWC_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
new file mode 100644
index 000000000000..bb563f50ef09
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
@@ -0,0 +1,86 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+					 u8 match_criteria_enable,
+					 struct mlx5hws_match_parameters *mask)
+{
+	struct mlx5hws_definer match_layout = {0};
+	struct mlx5hws_match_template *mt;
+	bool is_complex = false;
+	int ret;
+
+	if (!match_criteria_enable)
+		return false; /* empty matcher */
+
+	mt = mlx5hws_match_template_create(ctx,
+					   mask->match_buf,
+					   mask->match_sz,
+					   match_criteria_enable);
+	if (!mt) {
+		mlx5hws_err(ctx, "BWC: failed creating match template\n");
+		return false;
+	}
+
+	ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+	if (ret) {
+		/* The only case that we're interested in is E2BIG,
+		 * which means that the match parameters need to be
+		 * split into complex martcher.
+		 * For all other cases (good or bad) - just return true
+		 * and let the usual match creation path handle it,
+		 * both for good and bad flows.
+		 */
+		if (ret == E2BIG) {
+			is_complex = true;
+			mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n");
+		} else {
+			mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+		}
+	}
+
+	mlx5hws_match_template_destroy(mt);
+
+	return is_complex;
+}
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+				       struct mlx5hws_table *table,
+				       u32 priority,
+				       u8 match_criteria_enable,
+				       struct mlx5hws_match_parameters *mask)
+{
+	mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n");
+	return -EOPNOTSUPP;
+}
+
+void
+mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+	/* nothing to do here */
+}
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+				    struct mlx5hws_match_parameters *params,
+				    u32 flow_source,
+				    struct mlx5hws_rule_action rule_actions[],
+				    u16 bwc_queue_idx)
+{
+	mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx,
+		    "Complex rule is not supported yet\n");
+	return -EOPNOTSUPP;
+}
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
+{
+	return 0;
+}
+
+int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+	mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
+		    "Moving complex rule is not supported yet\n");
+	return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
new file mode 100644
index 000000000000..068ee8118609
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
@@ -0,0 +1,29 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_BWC_COMPLEX_H_
+#define MLX5HWS_BWC_COMPLEX_H_
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+					 u8 match_criteria_enable,
+					 struct mlx5hws_match_parameters *mask);
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+				       struct mlx5hws_table *table,
+				       u32 priority,
+				       u8 match_criteria_enable,
+				       struct mlx5hws_match_parameters *mask);
+
+void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+				    struct mlx5hws_match_parameters *params,
+				    u32 flow_source,
+				    struct mlx5hws_rule_action rule_actions[],
+				    u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule);
+
+#endif /* MLX5HWS_BWC_COMPLEX_H_ */