diff mbox series

[net-next,05/10] net/mlx5: hw counters: Don't maintain a counter count

Message ID 20240815054656.2210494-6-tariqt@nvidia.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series net/mlx5: hw counters refactor and misc | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 29 this patch: 29
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 1 maintainers not CCed: linux-rdma@vger.kernel.org
netdev/build_clang success Errors and warnings before: 29 this patch: 29
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 29 this patch: 29
netdev/checkpatch warning WARNING: line length of 84 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 93 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest warning net-next-2024-08-15--12-00 (tests: 707)

Commit Message

Tariq Toukan Aug. 15, 2024, 5:46 a.m. UTC
From: Cosmin Ratiu <cratiu@nvidia.com>

num_counters is only used for deciding whether to grow the bulk query
buffer, which is done once more counters than a small initial threshold
are present. After that, maintaining num_counters serves no purpose.

This commit replaces that with an actual xarray traversal to count the
counters. This appears expensive at first sight, but is only done when
the number of counters is less than the initial threshold (8) and only
once every sampling interval. Once the number of counters goes above the
threshold, the bulk query buffer is grown to max size and the xarray
traversal is never done again.

Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
 .../ethernet/mellanox/mlx5/core/fs_counters.c | 40 +++++++++----------
 1 file changed, 18 insertions(+), 22 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index ef13941e55c2..0b80c33cba5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -78,7 +78,6 @@  struct mlx5_fc_stats {
 	unsigned long sampling_interval; /* jiffies */
 	u32 *bulk_query_out;
 	int bulk_query_len;
-	size_t num_counters;  /* Also protected by xarray->xa_lock. */
 	bool bulk_query_alloc_failed;
 	unsigned long next_bulk_query_alloc;
 	struct mlx5_fc_pool fc_pool;
@@ -217,21 +216,28 @@  static void mlx5_fc_stats_bulk_query_buf_realloc(struct mlx5_core_dev *dev,
 		       bulk_query_len);
 }
 
+static int mlx5_fc_num_counters(struct mlx5_fc_stats *fc_stats)
+{
+	struct mlx5_fc *counter;
+	int num_counters = 0;
+	unsigned long id;
+
+	xa_for_each(&fc_stats->counters, id, counter)
+		num_counters++;
+	return num_counters;
+}
+
 static void mlx5_fc_stats_work(struct work_struct *work)
 {
 	struct mlx5_fc_stats *fc_stats = container_of(work, struct mlx5_fc_stats,
 						      work.work);
 	struct mlx5_core_dev *dev = fc_stats->fc_pool.dev;
-	int num_counters;
 
 	queue_delayed_work(fc_stats->wq, &fc_stats->work, fc_stats->sampling_interval);
 
-	/* num_counters is only needed for determining whether to increase the buffer. */
-	xa_lock(&fc_stats->counters);
-	num_counters = fc_stats->num_counters;
-	xa_unlock(&fc_stats->counters);
-	if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
-	    num_counters > get_init_bulk_query_len(dev))
+	/* Grow the bulk query buffer to max if not maxed and enough counters are present. */
+	if (unlikely(fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
+		     mlx5_fc_num_counters(fc_stats) > get_init_bulk_query_len(dev)))
 		mlx5_fc_stats_bulk_query_buf_realloc(dev, get_max_bulk_query_len(dev));
 
 	mlx5_fc_stats_query_all_counters(dev);
@@ -287,15 +293,9 @@  struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging)
 		counter->lastbytes = counter->cache.bytes;
 		counter->lastpackets = counter->cache.packets;
 
-		xa_lock(&fc_stats->counters);
-
-		err = xa_err(__xa_store(&fc_stats->counters, id, counter, GFP_KERNEL));
-		if (err != 0) {
-			xa_unlock(&fc_stats->counters);
+		err = xa_err(xa_store(&fc_stats->counters, id, counter, GFP_KERNEL));
+		if (err != 0)
 			goto err_out_alloc;
-		}
-		fc_stats->num_counters++;
-		xa_unlock(&fc_stats->counters);
 	}
 
 	return counter;
@@ -324,12 +324,8 @@  void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
 	if (!counter)
 		return;
 
-	if (counter->aging) {
-		xa_lock(&fc_stats->counters);
-		fc_stats->num_counters--;
-		__xa_erase(&fc_stats->counters, counter->id);
-		xa_unlock(&fc_stats->counters);
-	}
+	if (counter->aging)
+		xa_erase(&fc_stats->counters, counter->id);
 	mlx5_fc_release(dev, counter);
 }
 EXPORT_SYMBOL(mlx5_fc_destroy);