diff mbox series

[net-next,05/14] net/mlx5: qos: Maintain rate group vport members in a list

Message ID 20241008183222.137702-6-tariqt@nvidia.com (mailing list archive)
State Accepted
Commit d3a3b0765e18d78117cbf7b4cd61cd4a6ab2b5e5
Delegated to: Netdev Maintainers
Headers show
Series net/mlx5: qos: Refactor esw qos to support new features | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 6 this patch: 6
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 1 maintainers not CCed: linux-rdma@vger.kernel.org
netdev/build_clang success Errors and warnings before: 6 this patch: 6
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 5 this patch: 5
netdev/checkpatch warning WARNING: line length of 83 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 96 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 1 this patch: 1
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-10-10--09-00 (tests: 775)

Commit Message

Tariq Toukan Oct. 8, 2024, 6:32 p.m. UTC
From: Cosmin Ratiu <cratiu@nvidia.com>

Previously, finding group members was done by iterating over all vports
of an eswitch and comparing their group with the required one, but that
approach will break down when a group can contain vports from multiple
eswitches.

Solve that by maintaining a list of vport members.
Instead of iterating over esw vports, loop over the members list.
Use this opportunity to provide two new functions to allocate and free a
group, so that the number of state transitions is smaller. This will
also be used in a future patch.

Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
 .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 94 +++++++++++--------
 .../net/ethernet/mellanox/mlx5/core/eswitch.h |  1 +
 2 files changed, 58 insertions(+), 37 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index a8231a498ed6..cfff1413dcfc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -20,8 +20,17 @@  struct mlx5_esw_rate_group {
 	/* A computed value indicating relative min_rate between group members. */
 	u32 bw_share;
 	struct list_head list;
+	/* Vport members of this group.*/
+	struct list_head members;
 };
 
+static void esw_qos_vport_set_group(struct mlx5_vport *vport, struct mlx5_esw_rate_group *group)
+{
+	list_del_init(&vport->qos.group_entry);
+	vport->qos.group = group;
+	list_add_tail(&vport->qos.group_entry, &group->members);
+}
+
 static int esw_qos_sched_elem_config(struct mlx5_core_dev *dev, u32 sched_elem_ix,
 				     u32 max_rate, u32 bw_share)
 {
@@ -89,17 +98,13 @@  static u32 esw_qos_calculate_group_min_rate_divider(struct mlx5_eswitch *esw,
 	u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
 	struct mlx5_vport *vport;
 	u32 max_guarantee = 0;
-	unsigned long i;
-
 
 	/* Find max min_rate across all vports in this group.
 	 * This will correspond to fw_max_bw_share in the final bw_share calculation.
 	 */
-	mlx5_esw_for_each_vport(esw, i, vport) {
-		if (!vport->enabled || !vport->qos.enabled ||
-		    vport->qos.group != group || vport->qos.min_rate < max_guarantee)
-			continue;
-		max_guarantee = vport->qos.min_rate;
+	list_for_each_entry(vport, &group->members, qos.group_entry) {
+		if (vport->qos.min_rate > max_guarantee)
+			max_guarantee = vport->qos.min_rate;
 	}
 
 	if (max_guarantee)
@@ -155,13 +160,10 @@  static int esw_qos_normalize_group_min_rate(struct mlx5_eswitch *esw,
 	u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
 	u32 divider = esw_qos_calculate_group_min_rate_divider(esw, group);
 	struct mlx5_vport *vport;
-	unsigned long i;
 	u32 bw_share;
 	int err;
 
-	mlx5_esw_for_each_vport(esw, i, vport) {
-		if (!vport->enabled || !vport->qos.enabled || vport->qos.group != group)
-			continue;
+	list_for_each_entry(vport, &group->members, qos.group_entry) {
 		bw_share = esw_qos_calc_bw_share(vport->qos.min_rate, divider, fw_max_bw_share);
 
 		if (bw_share == vport->qos.bw_share)
@@ -295,7 +297,6 @@  static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw,
 				      u32 max_rate, struct netlink_ext_ack *extack)
 {
 	struct mlx5_vport *vport;
-	unsigned long i;
 	int err;
 
 	if (group->max_rate == max_rate)
@@ -308,9 +309,8 @@  static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw,
 	group->max_rate = max_rate;
 
 	/* Any unlimited vports in the group should be set with the value of the group. */
-	mlx5_esw_for_each_vport(esw, i, vport) {
-		if (!vport->enabled || !vport->qos.enabled ||
-		    vport->qos.group != group || vport->qos.max_rate)
+	list_for_each_entry(vport, &group->members, qos.group_entry) {
+		if (vport->qos.max_rate)
 			continue;
 
 		err = esw_qos_vport_config(esw, vport, max_rate, vport->qos.bw_share, extack);
@@ -395,7 +395,7 @@  static int esw_qos_update_group_scheduling_element(struct mlx5_eswitch *esw,
 		return err;
 	}
 
-	vport->qos.group = new_group;
+	esw_qos_vport_set_group(vport, new_group);
 	/* Use new group max rate if vport max rate is unlimited. */
 	max_rate = vport->qos.max_rate ? vport->qos.max_rate : new_group->max_rate;
 	err = esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share);
@@ -407,7 +407,7 @@  static int esw_qos_update_group_scheduling_element(struct mlx5_eswitch *esw,
 	return 0;
 
 err_sched:
-	vport->qos.group = curr_group;
+	esw_qos_vport_set_group(vport, curr_group);
 	max_rate = vport->qos.max_rate ? vport->qos.max_rate : curr_group->max_rate;
 	if (esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share))
 		esw_warn(esw->dev, "E-Switch vport group restore failed (vport=%d)\n",
@@ -446,16 +446,33 @@  static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
 }
 
 static struct mlx5_esw_rate_group *
-__esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+__esw_qos_alloc_rate_group(struct mlx5_eswitch *esw, u32 tsar_ix)
 {
-	u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
 	struct mlx5_esw_rate_group *group;
-	void *attr;
-	int err;
 
 	group = kzalloc(sizeof(*group), GFP_KERNEL);
 	if (!group)
-		return ERR_PTR(-ENOMEM);
+		return NULL;
+
+	group->tsar_ix = tsar_ix;
+	INIT_LIST_HEAD(&group->members);
+	list_add_tail(&group->list, &esw->qos.groups);
+	return group;
+}
+
+static void __esw_qos_free_rate_group(struct mlx5_esw_rate_group *group)
+{
+	list_del(&group->list);
+	kfree(group);
+}
+
+static struct mlx5_esw_rate_group *
+__esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+{
+	u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+	struct mlx5_esw_rate_group *group;
+	int tsar_ix, err;
+	void *attr;
 
 	MLX5_SET(scheduling_context, tsar_ctx, element_type,
 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
@@ -466,13 +483,18 @@  __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex
 	err = mlx5_create_scheduling_element_cmd(esw->dev,
 						 SCHEDULING_HIERARCHY_E_SWITCH,
 						 tsar_ctx,
-						 &group->tsar_ix);
+						 &tsar_ix);
 	if (err) {
 		NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for group failed");
-		goto err_sched_elem;
+		return ERR_PTR(err);
 	}
 
-	list_add_tail(&group->list, &esw->qos.groups);
+	group = __esw_qos_alloc_rate_group(esw, tsar_ix);
+	if (!group) {
+		NL_SET_ERR_MSG_MOD(extack, "E-Switch alloc group failed");
+		err = -ENOMEM;
+		goto err_alloc_group;
+	}
 
 	err = esw_qos_normalize_min_rate(esw, extack);
 	if (err) {
@@ -484,13 +506,12 @@  __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex
 	return group;
 
 err_min_rate:
-	list_del(&group->list);
+	__esw_qos_free_rate_group(group);
+err_alloc_group:
 	if (mlx5_destroy_scheduling_element_cmd(esw->dev,
 						SCHEDULING_HIERARCHY_E_SWITCH,
-						group->tsar_ix))
+						tsar_ix))
 		NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
-err_sched_elem:
-	kfree(group);
 	return ERR_PTR(err);
 }
 
@@ -523,21 +544,19 @@  static int __esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
 {
 	int err;
 
-	list_del(&group->list);
-
-	err = esw_qos_normalize_min_rate(esw, extack);
-	if (err)
-		NL_SET_ERR_MSG_MOD(extack, "E-Switch groups normalization failed");
+	trace_mlx5_esw_group_qos_destroy(esw->dev, group, group->tsar_ix);
 
 	err = mlx5_destroy_scheduling_element_cmd(esw->dev,
 						  SCHEDULING_HIERARCHY_E_SWITCH,
 						  group->tsar_ix);
 	if (err)
 		NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed");
+	__esw_qos_free_rate_group(group);
 
-	trace_mlx5_esw_group_qos_destroy(esw->dev, group, group->tsar_ix);
+	err = esw_qos_normalize_min_rate(esw, extack);
+	if (err)
+		NL_SET_ERR_MSG_MOD(extack, "E-Switch groups normalization failed");
 
-	kfree(group);
 
 	return err;
 }
@@ -655,7 +674,8 @@  static int esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
 	if (err)
 		return err;
 
-	vport->qos.group = esw->qos.group0;
+	INIT_LIST_HEAD(&vport->qos.group_entry);
+	esw_qos_vport_set_group(vport, esw->qos.group0);
 
 	err = esw_qos_vport_create_sched_element(esw, vport, max_rate, bw_share);
 	if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index ce857eae6898..f208ae16bfd2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -220,6 +220,7 @@  struct mlx5_vport {
 		/* A computed value indicating relative min_rate between vports in a group. */
 		u32 bw_share;
 		struct mlx5_esw_rate_group *group;
+		struct list_head group_entry;
 	} qos;
 
 	u16 vport;