diff mbox series

[net-next,09/13] net/mlx5: fs, add HWS fte API functions

Message ID 20250107060708.1610882-10-tariqt@nvidia.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series mlx5 HW-Managed Flow Steering in FS core level | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1 this patch: 1
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 1 maintainers not CCed: linux-rdma@vger.kernel.org
netdev/build_clang success Errors and warnings before: 73 this patch: 73
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 2 this patch: 2
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 95 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Tariq Toukan Jan. 7, 2025, 6:07 a.m. UTC
From: Moshe Shemesh <moshe@nvidia.com>

Add create, destroy and update fte API functions for adding, removing
and updating flow steering rules in HW Steering mode. Get HWS actions
according to required rule, use actions from pool whenever possible.

Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Reviewed-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
 .../net/ethernet/mellanox/mlx5/core/fs_core.h |   5 +-
 .../mellanox/mlx5/core/steering/hws/fs_hws.c  | 543 ++++++++++++++++++
 .../mellanox/mlx5/core/steering/hws/fs_hws.h  |  13 +
 3 files changed, 560 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index b6543a53d7c3..db0458b46390 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -254,7 +254,10 @@  struct fs_fte_dup {
 /* Type of children is mlx5_flow_rule */
 struct fs_fte {
 	struct fs_node			node;
-	struct mlx5_fs_dr_rule		fs_dr_rule;
+	union {
+		struct mlx5_fs_dr_rule		fs_dr_rule;
+		struct mlx5_fs_hws_rule		fs_hws_rule;
+	};
 	u32				val[MLX5_ST_SZ_DW_MATCH_PARAM];
 	struct fs_fte_action		act_dests;
 	struct fs_fte_dup		*dup;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
index 6ee902999a01..e142e350160a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -357,6 +357,546 @@  static int mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
 	return mlx5hws_bwc_matcher_destroy(fg->fs_hws_matcher.matcher);
 }
 
+static struct mlx5hws_action *
+get_dest_action_ft(struct mlx5_fs_hws_context *fs_ctx,
+		   struct mlx5_flow_rule *dst)
+{
+	return xa_load(&fs_ctx->hws_pool.table_dests, dst->dest_attr.ft->id);
+}
+
+static struct mlx5hws_action *
+get_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
+			  struct mlx5_flow_rule *dst)
+{
+	u32 table_num = dst->dest_attr.ft_num;
+
+	return xa_load(&fs_ctx->hws_pool.table_dests, table_num);
+}
+
+static struct mlx5hws_action *
+create_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
+			     struct mlx5_flow_rule *dst)
+{
+	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+	u32 table_num = dst->dest_attr.ft_num;
+
+	return mlx5hws_action_create_dest_table_num(ctx, table_num, flags);
+}
+
+static struct mlx5hws_action *
+create_dest_action_range(struct mlx5hws_context *ctx, struct mlx5_flow_rule *dst)
+{
+	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+	struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+
+	return mlx5hws_action_create_dest_match_range(ctx,
+						      dest_attr->range.field,
+						      dest_attr->range.hit_ft,
+						      dest_attr->range.miss_ft,
+						      dest_attr->range.min,
+						      dest_attr->range.max,
+						      flags);
+}
+
+static struct mlx5hws_action *
+create_action_dest_array(struct mlx5hws_context *ctx,
+			 struct mlx5hws_action_dest_attr *dests,
+			 u32 num_of_dests, bool ignore_flow_level,
+			 u32 flow_source)
+{
+	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+	return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
+						ignore_flow_level,
+						flow_source, flags);
+}
+
+static struct mlx5hws_action *
+get_action_push_vlan(struct mlx5_fs_hws_context *fs_ctx)
+{
+	return fs_ctx->hws_pool.push_vlan_action;
+}
+
+static u32 calc_vlan_hdr(struct mlx5_fs_vlan *vlan)
+{
+	u16 n_ethtype = vlan->ethtype;
+	u8 prio = vlan->prio;
+	u16 vid = vlan->vid;
+
+	return (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
+}
+
+static struct mlx5hws_action *
+get_action_pop_vlan(struct mlx5_fs_hws_context *fs_ctx)
+{
+	return fs_ctx->hws_pool.pop_vlan_action;
+}
+
+static struct mlx5hws_action *
+get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context *fs_ctx)
+{
+	return fs_ctx->hws_pool.decapl2_action;
+}
+
+static struct mlx5hws_action *
+get_dest_action_drop(struct mlx5_fs_hws_context *fs_ctx)
+{
+	return fs_ctx->hws_pool.drop_action;
+}
+
+static struct mlx5hws_action *
+get_action_tag(struct mlx5_fs_hws_context *fs_ctx)
+{
+	return fs_ctx->hws_pool.tag_action;
+}
+
+static struct mlx5hws_action *
+create_action_last(struct mlx5hws_context *ctx)
+{
+	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+	return mlx5hws_action_create_last(ctx, flags);
+}
+
+static void destroy_fs_action(struct mlx5_fs_hws_rule_action *fs_action)
+{
+	switch (mlx5hws_action_get_type(fs_action->action)) {
+	case MLX5HWS_ACTION_TYP_CTR:
+		mlx5_fc_put_hws_action(fs_action->counter);
+		break;
+	default:
+		mlx5hws_action_destroy(fs_action->action);
+	}
+}
+
+static void destroy_fs_actions(struct mlx5_fs_hws_rule_action **fs_actions,
+			       int *num_fs_actions)
+{
+	int i;
+
+	/* Free in reverse order to handle action dependencies */
+	for (i = *num_fs_actions - 1; i >= 0; i--)
+		destroy_fs_action(*fs_actions + i);
+	*num_fs_actions = 0;
+	kfree(*fs_actions);
+	*fs_actions = NULL;
+}
+
+/* Splits FTE's actions into cached, rule and destination actions.
+ * The cached and destination actions are saved on the fte hws rule.
+ * The rule actions are returned as a parameter, together with their count.
+ * We want to support a rule with 32 destinations, which means we need to
+ * account for 32 destinations plus usually a counter plus one more action
+ * for a multi-destination flow table.
+ * 32 is SW limitation for array size, keep. HWS limitation is 16M STEs per matcher
+ */
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 34
+static int fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
+			       struct mlx5_flow_table *ft,
+			       struct mlx5_flow_group *group,
+			       struct fs_fte *fte,
+			       struct mlx5hws_rule_action **ractions)
+{
+	struct mlx5_flow_act *fte_action = &fte->act_dests.action;
+	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+	struct mlx5hws_action_dest_attr *dest_actions;
+	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+	struct mlx5_fs_hws_rule_action *fs_actions;
+	struct mlx5_core_dev *dev = ns->dev;
+	struct mlx5hws_action *dest_action;
+	struct mlx5hws_action *tmp_action;
+	struct mlx5_fs_hws_pr *pr_data;
+	struct mlx5_fs_hws_mh *mh_data;
+	bool delay_encap_set = false;
+	struct mlx5_flow_rule *dst;
+	int num_dest_actions = 0;
+	int num_fs_actions = 0;
+	int num_actions = 0;
+	int err;
+
+	*ractions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(**ractions),
+			    GFP_KERNEL);
+	if (!*ractions) {
+		err = -ENOMEM;
+		goto out_err;
+	}
+
+	fs_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+			     sizeof(*fs_actions), GFP_KERNEL);
+	if (!fs_actions) {
+		err = -ENOMEM;
+		goto free_actions_alloc;
+	}
+
+	dest_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+			       sizeof(*dest_actions), GFP_KERNEL);
+	if (!dest_actions) {
+		err = -ENOMEM;
+		goto free_fs_actions_alloc;
+	}
+
+	/* The order of the actions are must to be kept, only the following
+	 * order is supported by HW steering:
+	 * HWS: decap -> remove_hdr -> pop_vlan -> modify header -> push_vlan
+	 *      -> reformat (insert_hdr/encap) -> ctr -> tag -> aso
+	 *      -> drop -> FWD:tbl/vport/sampler/tbl_num/range -> dest_array -> last
+	 */
+	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+		tmp_action = get_action_decap_tnl_l2_to_l2(fs_ctx);
+		if (!tmp_action) {
+			err = -ENOMEM;
+			goto free_dest_actions_alloc;
+		}
+		(*ractions)[num_actions++].action = tmp_action;
+	}
+
+	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+		int reformat_type = fte_action->pkt_reformat->reformat_type;
+
+		if (fte_action->pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
+			mlx5_core_err(dev, "FW-owned reformat can't be used in HWS rule\n");
+			err = -EINVAL;
+			goto free_actions;
+		}
+
+		if (reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2) {
+			pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
+			(*ractions)[num_actions].reformat.offset = pr_data->offset;
+			(*ractions)[num_actions].reformat.hdr_idx = pr_data->hdr_idx;
+			(*ractions)[num_actions].reformat.data = pr_data->data;
+			(*ractions)[num_actions++].action =
+				fte_action->pkt_reformat->fs_hws_action.hws_action;
+		} else if (reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) {
+			(*ractions)[num_actions++].action =
+				fte_action->pkt_reformat->fs_hws_action.hws_action;
+		} else {
+			delay_encap_set = true;
+		}
+	}
+
+	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+		tmp_action = get_action_pop_vlan(fs_ctx);
+		if (!tmp_action) {
+			err = -ENOMEM;
+			goto free_actions;
+		}
+		(*ractions)[num_actions++].action = tmp_action;
+	}
+
+	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
+		tmp_action = get_action_pop_vlan(fs_ctx);
+		if (!tmp_action) {
+			err = -ENOMEM;
+			goto free_actions;
+		}
+		(*ractions)[num_actions++].action = tmp_action;
+	}
+
+	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+		mh_data = fte_action->modify_hdr->fs_hws_action.mh_data;
+		(*ractions)[num_actions].modify_header.offset = mh_data->offset;
+		(*ractions)[num_actions].modify_header.data = mh_data->data;
+		(*ractions)[num_actions++].action =
+			fte_action->modify_hdr->fs_hws_action.hws_action;
+	}
+
+	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+		tmp_action = get_action_push_vlan(fs_ctx);
+		if (!tmp_action) {
+			err = -ENOMEM;
+			goto free_actions;
+		}
+		(*ractions)[num_actions].push_vlan.vlan_hdr =
+			htonl(calc_vlan_hdr(&fte_action->vlan[0]));
+		(*ractions)[num_actions++].action = tmp_action;
+	}
+
+	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+		tmp_action = get_action_push_vlan(fs_ctx);
+		if (!tmp_action) {
+			err = -ENOMEM;
+			goto free_actions;
+		}
+		(*ractions)[num_actions].push_vlan.vlan_hdr =
+			htonl(calc_vlan_hdr(&fte_action->vlan[1]));
+		(*ractions)[num_actions++].action = tmp_action;
+	}
+
+	if (delay_encap_set) {
+		pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
+		(*ractions)[num_actions].reformat.offset = pr_data->offset;
+		(*ractions)[num_actions].reformat.data = pr_data->data;
+		(*ractions)[num_actions++].action =
+			fte_action->pkt_reformat->fs_hws_action.hws_action;
+	}
+
+	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+		list_for_each_entry(dst, &fte->node.children, node.list) {
+			struct mlx5_fc *counter;
+
+			if (dst->dest_attr.type !=
+			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+				continue;
+
+			if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+				err = -EOPNOTSUPP;
+				goto free_actions;
+			}
+
+			counter = dst->dest_attr.counter;
+			tmp_action = mlx5_fc_get_hws_action(ctx, counter);
+			if (!tmp_action) {
+				err = -EINVAL;
+				goto free_actions;
+			}
+
+			(*ractions)[num_actions].counter.offset =
+				mlx5_fc_id(counter) - mlx5_fc_get_base_id(counter);
+			(*ractions)[num_actions++].action = tmp_action;
+			fs_actions[num_fs_actions].action = tmp_action;
+			fs_actions[num_fs_actions++].counter = counter;
+		}
+	}
+
+	if (fte->act_dests.flow_context.flow_tag) {
+		if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+			err = -EOPNOTSUPP;
+			goto free_actions;
+		}
+		tmp_action = get_action_tag(fs_ctx);
+		if (!tmp_action) {
+			err = -ENOMEM;
+			goto free_actions;
+		}
+		(*ractions)[num_actions].tag.value = fte->act_dests.flow_context.flow_tag;
+		(*ractions)[num_actions++].action = tmp_action;
+	}
+
+	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+		err = -EOPNOTSUPP;
+		goto free_actions;
+	}
+
+	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+		dest_action = get_dest_action_drop(fs_ctx);
+		if (!dest_action) {
+			err = -ENOMEM;
+			goto free_actions;
+		}
+		dest_actions[num_dest_actions++].dest = dest_action;
+	}
+
+	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+		list_for_each_entry(dst, &fte->node.children, node.list) {
+			struct mlx5_flow_destination *attr = &dst->dest_attr;
+
+			if (num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+			    num_dest_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+				err = -EOPNOTSUPP;
+				goto free_actions;
+			}
+			if (attr->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+				continue;
+
+			switch (attr->type) {
+			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+				dest_action = get_dest_action_ft(fs_ctx, dst);
+				break;
+			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+				dest_action = get_dest_action_table_num(fs_ctx, dst);
+				if (dest_action)
+					break;
+				dest_action = create_dest_action_table_num(fs_ctx, dst);
+				fs_actions[num_fs_actions++].action = dest_action;
+				break;
+			case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+				dest_action = create_dest_action_range(ctx, dst);
+				fs_actions[num_fs_actions++].action = dest_action;
+				break;
+			default:
+				err = -EOPNOTSUPP;
+				goto free_actions;
+			}
+			if (!dest_action) {
+				err = -ENOMEM;
+				goto free_actions;
+			}
+			dest_actions[num_dest_actions++].dest = dest_action;
+		}
+	}
+
+	if (num_dest_actions == 1) {
+		if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+			err = -EOPNOTSUPP;
+			goto free_actions;
+		}
+		(*ractions)[num_actions++].action = dest_actions->dest;
+	} else if (num_dest_actions > 1) {
+		bool ignore_flow_level =
+			!!(fte_action->flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+
+		if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+		    num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+			err = -EOPNOTSUPP;
+			goto free_actions;
+		}
+		tmp_action = create_action_dest_array(ctx, dest_actions,
+						      num_dest_actions,
+						      ignore_flow_level,
+						      fte->act_dests.flow_context.flow_source);
+		if (!tmp_action) {
+			err = -EOPNOTSUPP;
+			goto free_actions;
+		}
+		fs_actions[num_fs_actions++].action = tmp_action;
+		(*ractions)[num_actions++].action = tmp_action;
+	}
+
+	if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+	    num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+		err = -EOPNOTSUPP;
+		goto free_actions;
+	}
+
+	tmp_action = create_action_last(ctx);
+	if (!tmp_action) {
+		err = -ENOMEM;
+		goto free_actions;
+	}
+	fs_actions[num_fs_actions++].action = tmp_action;
+	(*ractions)[num_actions++].action = tmp_action;
+
+	kfree(dest_actions);
+
+	/* Actions created specifically for this rule will be destroyed
+	 * once rule is deleted.
+	 */
+	fte->fs_hws_rule.num_fs_actions = num_fs_actions;
+	fte->fs_hws_rule.hws_fs_actions = fs_actions;
+
+	return 0;
+
+free_actions:
+	destroy_fs_actions(&fs_actions, &num_fs_actions);
+free_dest_actions_alloc:
+	kfree(dest_actions);
+free_fs_actions_alloc:
+	kfree(fs_actions);
+free_actions_alloc:
+	kfree(*ractions);
+	*ractions = NULL;
+out_err:
+	return err;
+}
+
+static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
+				   struct mlx5_flow_table *ft,
+				   struct mlx5_flow_group *group,
+				   struct fs_fte *fte)
+{
+	struct mlx5hws_match_parameters params;
+	struct mlx5hws_rule_action *ractions;
+	struct mlx5hws_bwc_rule *rule;
+	int err = 0;
+
+	if (mlx5_fs_cmd_is_fw_term_table(ft)) {
+		/* Packet reformat on terminamtion table not supported yet */
+		if (fte->act_dests.action.action &
+		    MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
+			return -EOPNOTSUPP;
+		return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
+	}
+
+	err = fte_get_hws_actions(ns, ft, group, fte, &ractions);
+	if (err)
+		goto out_err;
+
+	params.match_sz = sizeof(fte->val);
+	params.match_buf = fte->val;
+
+	rule = mlx5hws_bwc_rule_create(group->fs_hws_matcher.matcher, &params,
+				       fte->act_dests.flow_context.flow_source,
+				       ractions);
+	kfree(ractions);
+	if (!rule) {
+		err = -EINVAL;
+		goto free_actions;
+	}
+
+	fte->fs_hws_rule.bwc_rule = rule;
+	return 0;
+
+free_actions:
+	destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
+			   &fte->fs_hws_rule.num_fs_actions);
+out_err:
+	mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
+	return err;
+}
+
+static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
+				   struct mlx5_flow_table *ft,
+				   struct fs_fte *fte)
+{
+	struct mlx5_fs_hws_rule *rule = &fte->fs_hws_rule;
+	int err;
+
+	if (mlx5_fs_cmd_is_fw_term_table(ft))
+		return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
+
+	err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
+	rule->bwc_rule = NULL;
+
+	destroy_fs_actions(&rule->hws_fs_actions, &rule->num_fs_actions);
+
+	return err;
+}
+
+static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
+				   struct mlx5_flow_table *ft,
+				   struct mlx5_flow_group *group,
+				   int modify_mask,
+				   struct fs_fte *fte)
+{
+	int allowed_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
+		BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST) |
+		BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+	struct mlx5_fs_hws_rule_action *saved_hws_fs_actions;
+	struct mlx5hws_rule_action *ractions;
+	int saved_num_fs_actions;
+	int ret;
+
+	if (mlx5_fs_cmd_is_fw_term_table(ft))
+		return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group,
+							     modify_mask, fte);
+
+	if ((modify_mask & ~allowed_mask) != 0)
+		return -EINVAL;
+
+	saved_hws_fs_actions = fte->fs_hws_rule.hws_fs_actions;
+	saved_num_fs_actions = fte->fs_hws_rule.num_fs_actions;
+
+	ret = fte_get_hws_actions(ns, ft, group, fte, &ractions);
+	if (ret)
+		return ret;
+
+	ret = mlx5hws_bwc_rule_action_update(fte->fs_hws_rule.bwc_rule, ractions);
+	kfree(ractions);
+	if (ret)
+		goto restore_actions;
+
+	destroy_fs_actions(&saved_hws_fs_actions, &saved_num_fs_actions);
+	return ret;
+
+restore_actions:
+	destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
+			   &fte->fs_hws_rule.num_fs_actions);
+	fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
+	fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
+	return ret;
+}
+
 static struct mlx5hws_action *
 create_action_remove_header_vlan(struct mlx5hws_context *ctx)
 {
@@ -712,6 +1252,9 @@  static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = {
 	.update_root_ft = mlx5_cmd_hws_update_root_ft,
 	.create_flow_group = mlx5_cmd_hws_create_flow_group,
 	.destroy_flow_group = mlx5_cmd_hws_destroy_flow_group,
+	.create_fte = mlx5_cmd_hws_create_fte,
+	.delete_fte = mlx5_cmd_hws_delete_fte,
+	.update_fte = mlx5_cmd_hws_update_fte,
 	.packet_reformat_alloc = mlx5_cmd_hws_packet_reformat_alloc,
 	.packet_reformat_dealloc = mlx5_cmd_hws_packet_reformat_dealloc,
 	.modify_header_alloc = mlx5_cmd_hws_modify_header_alloc,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
index c9807abd6c25..d260b14e3963 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
@@ -43,6 +43,19 @@  struct mlx5_fs_hws_matcher {
 	struct mlx5hws_bwc_matcher *matcher;
 };
 
+struct mlx5_fs_hws_rule_action {
+	struct mlx5hws_action *action;
+	union {
+		struct mlx5_fc *counter;
+	};
+};
+
+struct mlx5_fs_hws_rule {
+	struct mlx5hws_bwc_rule *bwc_rule;
+	struct mlx5_fs_hws_rule_action *hws_fs_actions;
+	int num_fs_actions;
+};
+
 #ifdef CONFIG_MLX5_HW_STEERING
 
 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void);