@@ -208,6 +208,18 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
+
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_fast) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_slow) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_slow_high_order) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_empty) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_refill) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_cached) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_cache_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_ring) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_ring_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_released_ref) },
#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
@@ -389,6 +401,18 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
+
+ s->rx_pp_hd_alloc_fast += rq_stats->pp_hd_alloc_fast;
+ s->rx_pp_hd_alloc_slow += rq_stats->pp_hd_alloc_slow;
+ s->rx_pp_hd_alloc_empty += rq_stats->pp_hd_alloc_empty;
+ s->rx_pp_hd_alloc_refill += rq_stats->pp_hd_alloc_refill;
+ s->rx_pp_hd_alloc_waive += rq_stats->pp_hd_alloc_waive;
+ s->rx_pp_hd_alloc_slow_high_order += rq_stats->pp_hd_alloc_slow_high_order;
+ s->rx_pp_hd_recycle_cached += rq_stats->pp_hd_recycle_cached;
+ s->rx_pp_hd_recycle_cache_full += rq_stats->pp_hd_recycle_cache_full;
+ s->rx_pp_hd_recycle_ring += rq_stats->pp_hd_recycle_ring;
+ s->rx_pp_hd_recycle_ring_full += rq_stats->pp_hd_recycle_ring_full;
+ s->rx_pp_hd_recycle_released_ref += rq_stats->pp_hd_recycle_released_ref;
#endif
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
@@ -518,6 +542,23 @@ static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
+
+ pool = c->rq.hd_page_pool;
+ if (!pool || !page_pool_get_stats(pool, &stats))
+ return;
+
+ rq_stats->pp_hd_alloc_fast = stats.alloc_stats.fast;
+ rq_stats->pp_hd_alloc_slow = stats.alloc_stats.slow;
+ rq_stats->pp_hd_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
+ rq_stats->pp_hd_alloc_empty = stats.alloc_stats.empty;
+ rq_stats->pp_hd_alloc_waive = stats.alloc_stats.waive;
+ rq_stats->pp_hd_alloc_refill = stats.alloc_stats.refill;
+
+ rq_stats->pp_hd_recycle_cached = stats.recycle_stats.cached;
+ rq_stats->pp_hd_recycle_cache_full = stats.recycle_stats.cache_full;
+ rq_stats->pp_hd_recycle_ring = stats.recycle_stats.ring;
+ rq_stats->pp_hd_recycle_ring_full = stats.recycle_stats.ring_full;
+ rq_stats->pp_hd_recycle_released_ref = stats.recycle_stats.released_refcnt;
}
#else
static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
@@ -2098,6 +2139,18 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
+
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_fast) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_slow) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_slow_high_order) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_empty) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_refill) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_waive) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_cached) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_cache_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_ring) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_ring_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_released_ref) },
#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
@@ -227,6 +227,18 @@ struct mlx5e_sw_stats {
u64 rx_pp_recycle_ring;
u64 rx_pp_recycle_ring_full;
u64 rx_pp_recycle_released_ref;
+
+ u64 rx_pp_hd_alloc_fast;
+ u64 rx_pp_hd_alloc_slow;
+ u64 rx_pp_hd_alloc_slow_high_order;
+ u64 rx_pp_hd_alloc_empty;
+ u64 rx_pp_hd_alloc_refill;
+ u64 rx_pp_hd_alloc_waive;
+ u64 rx_pp_hd_recycle_cached;
+ u64 rx_pp_hd_recycle_cache_full;
+ u64 rx_pp_hd_recycle_ring;
+ u64 rx_pp_hd_recycle_ring_full;
+ u64 rx_pp_hd_recycle_released_ref;
#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
@@ -393,6 +405,18 @@ struct mlx5e_rq_stats {
u64 pp_recycle_ring;
u64 pp_recycle_ring_full;
u64 pp_recycle_released_ref;
+
+ u64 pp_hd_alloc_fast;
+ u64 pp_hd_alloc_slow;
+ u64 pp_hd_alloc_slow_high_order;
+ u64 pp_hd_alloc_empty;
+ u64 pp_hd_alloc_refill;
+ u64 pp_hd_alloc_waive;
+ u64 pp_hd_recycle_cached;
+ u64 pp_hd_recycle_cache_full;
+ u64 pp_hd_recycle_ring;
+ u64 pp_hd_recycle_ring_full;
+ u64 pp_hd_recycle_released_ref;
#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;