diff mbox series

[net-next,v8,4/4] mlx5: add support for page_pool_get_stats

Message ID 1646172610-129397-5-git-send-email-jdamato@fastly.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series page_pool: Add stats counters | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1 this patch: 1
netdev/cc_maintainers success CCed 6 of 6 maintainers
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 1 this patch: 1
netdev/checkpatch warning WARNING: line length of 82 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 86 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Joe Damato March 1, 2022, 10:10 p.m. UTC
This change adds support for the page_pool_get_stats API to mlx5. If the
user has enabled CONFIG_PAGE_POOL_STATS in their kernel, ethtool will
output page pool stats.

Signed-off-by: Joe Damato <jdamato@fastly.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 75 ++++++++++++++++++++++
 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 27 +++++++-
 2 files changed, 101 insertions(+), 1 deletion(-)

Comments

Saeed Mahameed March 2, 2022, 1:02 a.m. UTC | #1
On 01 Mar 14:10, Joe Damato wrote:
>This change adds support for the page_pool_get_stats API to mlx5. If the
>user has enabled CONFIG_PAGE_POOL_STATS in their kernel, ethtool will
>output page pool stats.
>

I was hoping to see something other than ethtool, a driver-less approach,
page_pool is a first class citizen, it collects own stats and should be
able to report own stats without the need for driver help.

I understand these stats are per driver ring, but we can always come up with
a naming convention in the page pool to allow correlating page-pool stats
with per ring driver stats.

Anyway i can't think of a simple hack, so this patch is a good temporary
compromise until we come up with the right approach.

>Signed-off-by: Joe Damato <jdamato@fastly.com>
>---
> drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 75 ++++++++++++++++++++++
> drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 27 +++++++-
> 2 files changed, 101 insertions(+), 1 deletion(-)
>
>diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
>index 3e5d8c7..eb518ec 100644
>--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
>+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
>@@ -37,6 +37,10 @@
> #include "en/ptp.h"
> #include "en/port.h"
>
>+#ifdef CONFIG_PAGE_POOL_STATS
>+#include <net/page_pool.h>
>+#endif
>+
> static unsigned int stats_grps_num(struct mlx5e_priv *priv)
> {
> 	return !priv->profile->stats_grps_num ? 0 :
>@@ -183,6 +187,19 @@ static const struct counter_desc sw_stats_desc[] = {
> 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
> 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
> 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
>+#ifdef CONFIG_PAGE_POOL_STATS
>+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
>+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
>+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
>+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
>+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
>+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
>+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
>+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
>+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
>+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
>+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
>+#endif
> #ifdef CONFIG_MLX5_EN_TLS
> 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
> 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
>@@ -349,6 +366,19 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
> 	s->rx_congst_umr              += rq_stats->congst_umr;
> 	s->rx_arfs_err                += rq_stats->arfs_err;
> 	s->rx_recover                 += rq_stats->recover;
>+#ifdef CONFIG_PAGE_POOL_STATS
>+	s->rx_pp_alloc_fast          += rq_stats->pp_alloc_fast;
>+	s->rx_pp_alloc_slow          += rq_stats->pp_alloc_slow;
>+	s->rx_pp_alloc_empty         += rq_stats->pp_alloc_empty;
>+	s->rx_pp_alloc_refill        += rq_stats->pp_alloc_refill;
>+	s->rx_pp_alloc_waive         += rq_stats->pp_alloc_waive;
>+	s->rx_pp_alloc_slow_high_order		+= rq_stats->pp_alloc_slow_high_order;
>+	s->rx_pp_recycle_cached			+= rq_stats->pp_recycle_cached;
>+	s->rx_pp_recycle_cache_full		+= rq_stats->pp_recycle_cache_full;
>+	s->rx_pp_recycle_ring			+= rq_stats->pp_recycle_ring;
>+	s->rx_pp_recycle_ring_full		+= rq_stats->pp_recycle_ring_full;
>+	s->rx_pp_recycle_released_ref		+= rq_stats->pp_recycle_released_ref;
>+#endif
> #ifdef CONFIG_MLX5_EN_TLS
> 	s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
> 	s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
>@@ -455,6 +485,35 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
> 	}
> }
>
>+#ifdef CONFIG_PAGE_POOL_STATS
>+static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
>+{
>+	struct mlx5e_rq_stats *rq_stats = c->rq.stats;
>+	struct page_pool *pool = c->rq.page_pool;
>+	struct page_pool_stats stats = { 0 };
>+
you can drop the 0, just {} should be enough.

>+	if (!page_pool_get_stats(pool, &stats))
>+		return;
>+

you can contain the whole page_pool_stats objects inside rq_stats object,
and avoid all the assignments below.

just do:
    page_pool_get_stats(pool, &rq_stats.pp);
    return;

>+	rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
>+	rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
>+	rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
>+	rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
>+	rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
>+	rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
>+
>+	rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
>+	rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
>+	rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
>+	rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
>+	rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
>+}
>+#else
>+static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
>+{
>+}
>+#endif
Joe Damato March 2, 2022, 1:50 a.m. UTC | #2
On Tue, Mar 1, 2022 at 5:02 PM Saeed Mahameed <saeed@kernel.org> wrote:
>
> On 01 Mar 14:10, Joe Damato wrote:
> >This change adds support for the page_pool_get_stats API to mlx5. If the
> >user has enabled CONFIG_PAGE_POOL_STATS in their kernel, ethtool will
> >output page pool stats.
> >
>
> I was hoping to see something other than ethtool, a driver-less approach,
> page_pool is a first class citizen, it collects own stats and should be
> able to report own stats without the need for driver help.
>
> I understand these stats are per driver ring, but we can always come up with
> a naming convention in the page pool to allow correlating page-pool stats
> with per ring driver stats.
>
> Anyway i can't think of a simple hack, so this patch is a good temporary
> compromise until we come up with the right approach.
>
> >Signed-off-by: Joe Damato <jdamato@fastly.com>
> >---
> > drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 75 ++++++++++++++++++++++
> > drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 27 +++++++-
> > 2 files changed, 101 insertions(+), 1 deletion(-)
> >
> >diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
> >index 3e5d8c7..eb518ec 100644
> >--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
> >+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
> >@@ -37,6 +37,10 @@
> > #include "en/ptp.h"
> > #include "en/port.h"
> >
> >+#ifdef CONFIG_PAGE_POOL_STATS
> >+#include <net/page_pool.h>
> >+#endif
> >+
> > static unsigned int stats_grps_num(struct mlx5e_priv *priv)
> > {
> >       return !priv->profile->stats_grps_num ? 0 :
> >@@ -183,6 +187,19 @@ static const struct counter_desc sw_stats_desc[] = {
> >       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
> >       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
> >       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
> >+#ifdef CONFIG_PAGE_POOL_STATS
> >+      { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
> >+      { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
> >+      { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
> >+      { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
> >+      { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
> >+      { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
> >+      { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
> >+      { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
> >+      { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
> >+      { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
> >+      { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
> >+#endif
> > #ifdef CONFIG_MLX5_EN_TLS
> >       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
> >       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
> >@@ -349,6 +366,19 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
> >       s->rx_congst_umr              += rq_stats->congst_umr;
> >       s->rx_arfs_err                += rq_stats->arfs_err;
> >       s->rx_recover                 += rq_stats->recover;
> >+#ifdef CONFIG_PAGE_POOL_STATS
> >+      s->rx_pp_alloc_fast          += rq_stats->pp_alloc_fast;
> >+      s->rx_pp_alloc_slow          += rq_stats->pp_alloc_slow;
> >+      s->rx_pp_alloc_empty         += rq_stats->pp_alloc_empty;
> >+      s->rx_pp_alloc_refill        += rq_stats->pp_alloc_refill;
> >+      s->rx_pp_alloc_waive         += rq_stats->pp_alloc_waive;
> >+      s->rx_pp_alloc_slow_high_order          += rq_stats->pp_alloc_slow_high_order;
> >+      s->rx_pp_recycle_cached                 += rq_stats->pp_recycle_cached;
> >+      s->rx_pp_recycle_cache_full             += rq_stats->pp_recycle_cache_full;
> >+      s->rx_pp_recycle_ring                   += rq_stats->pp_recycle_ring;
> >+      s->rx_pp_recycle_ring_full              += rq_stats->pp_recycle_ring_full;
> >+      s->rx_pp_recycle_released_ref           += rq_stats->pp_recycle_released_ref;
> >+#endif
> > #ifdef CONFIG_MLX5_EN_TLS
> >       s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
> >       s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
> >@@ -455,6 +485,35 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
> >       }
> > }
> >
> >+#ifdef CONFIG_PAGE_POOL_STATS
> >+static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
> >+{
> >+      struct mlx5e_rq_stats *rq_stats = c->rq.stats;
> >+      struct page_pool *pool = c->rq.page_pool;
> >+      struct page_pool_stats stats = { 0 };
> >+
> you can drop the 0, just {} should be enough.
>
> >+      if (!page_pool_get_stats(pool, &stats))
> >+              return;
> >+
>
> you can contain the whole page_pool_stats objects inside rq_stats object,
> and avoid all the assignments below.
>
> just do:
>     page_pool_get_stats(pool, &rq_stats.pp);
>     return;

I don't think I can because the maximum stat name size is 32 bytes
(ETH_GSTRING_LEN).

If I do what you are suggesting, I would need to do something like:

{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats,
pp.recycle_stats.released_refcnt) }

which will generate a string of the form:
"rx%d_pp.recycle_stats.released_refcnt" which is well over 32 bytes,
especially with double-digit queue numbers.

The only options I see are:
  - A new define that allows setting a custom field name
(MLX5E_DECLARE_RX_STAT_NAME_OVERRIDE ?), or
  - Leaving the code as-is

Can you let me know what you prefer for the v9?

Thanks,
Joe
Saeed Mahameed March 2, 2022, 4:36 a.m. UTC | #3
On 01 Mar 17:50, Joe Damato wrote:
>On Tue, Mar 1, 2022 at 5:02 PM Saeed Mahameed <saeed@kernel.org> wrote:
>>

[...]

>The only options I see are:
>  - A new define that allows setting a custom field name
>(MLX5E_DECLARE_RX_STAT_NAME_OVERRIDE ?), or
>  - Leaving the code as-is
>
>Can you let me know what you prefer for the v9?
>

ack, leave as is.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 3e5d8c7..eb518ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -37,6 +37,10 @@ 
 #include "en/ptp.h"
 #include "en/port.h"
 
+#ifdef CONFIG_PAGE_POOL_STATS
+#include <net/page_pool.h>
+#endif
+
 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
 {
 	return !priv->profile->stats_grps_num ? 0 :
@@ -183,6 +187,19 @@  static const struct counter_desc sw_stats_desc[] = {
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
+#ifdef CONFIG_PAGE_POOL_STATS
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
+#endif
 #ifdef CONFIG_MLX5_EN_TLS
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
@@ -349,6 +366,19 @@  static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
 	s->rx_congst_umr              += rq_stats->congst_umr;
 	s->rx_arfs_err                += rq_stats->arfs_err;
 	s->rx_recover                 += rq_stats->recover;
+#ifdef CONFIG_PAGE_POOL_STATS
+	s->rx_pp_alloc_fast          += rq_stats->pp_alloc_fast;
+	s->rx_pp_alloc_slow          += rq_stats->pp_alloc_slow;
+	s->rx_pp_alloc_empty         += rq_stats->pp_alloc_empty;
+	s->rx_pp_alloc_refill        += rq_stats->pp_alloc_refill;
+	s->rx_pp_alloc_waive         += rq_stats->pp_alloc_waive;
+	s->rx_pp_alloc_slow_high_order		+= rq_stats->pp_alloc_slow_high_order;
+	s->rx_pp_recycle_cached			+= rq_stats->pp_recycle_cached;
+	s->rx_pp_recycle_cache_full		+= rq_stats->pp_recycle_cache_full;
+	s->rx_pp_recycle_ring			+= rq_stats->pp_recycle_ring;
+	s->rx_pp_recycle_ring_full		+= rq_stats->pp_recycle_ring_full;
+	s->rx_pp_recycle_released_ref		+= rq_stats->pp_recycle_released_ref;
+#endif
 #ifdef CONFIG_MLX5_EN_TLS
 	s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
 	s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
@@ -455,6 +485,35 @@  static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
 	}
 }
 
+#ifdef CONFIG_PAGE_POOL_STATS
+static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
+{
+	struct mlx5e_rq_stats *rq_stats = c->rq.stats;
+	struct page_pool *pool = c->rq.page_pool;
+	struct page_pool_stats stats = { 0 };
+
+	if (!page_pool_get_stats(pool, &stats))
+		return;
+
+	rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
+	rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
+	rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
+	rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
+	rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
+	rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
+
+	rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
+	rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
+	rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
+	rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
+	rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
+}
+#else
+static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
+{
+}
+#endif
+
 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
 {
 	struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -465,8 +524,11 @@  static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
 	for (i = 0; i < priv->stats_nch; i++) {
 		struct mlx5e_channel_stats *channel_stats =
 			priv->channel_stats[i];
+
 		int j;
 
+		mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
+
 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
 		mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
 		mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
@@ -1887,6 +1949,19 @@  static const struct counter_desc rq_stats_desc[] = {
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
+#ifdef CONFIG_PAGE_POOL_STATS
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
+#endif
 #ifdef CONFIG_MLX5_EN_TLS
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 14eaf92..a7a025d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -205,7 +205,19 @@  struct mlx5e_sw_stats {
 	u64 ch_aff_change;
 	u64 ch_force_irq;
 	u64 ch_eq_rearm;
-
+#ifdef CONFIG_PAGE_POOL_STATS
+	u64 rx_pp_alloc_fast;
+	u64 rx_pp_alloc_slow;
+	u64 rx_pp_alloc_slow_high_order;
+	u64 rx_pp_alloc_empty;
+	u64 rx_pp_alloc_refill;
+	u64 rx_pp_alloc_waive;
+	u64 rx_pp_recycle_cached;
+	u64 rx_pp_recycle_cache_full;
+	u64 rx_pp_recycle_ring;
+	u64 rx_pp_recycle_ring_full;
+	u64 rx_pp_recycle_released_ref;
+#endif
 #ifdef CONFIG_MLX5_EN_TLS
 	u64 tx_tls_encrypted_packets;
 	u64 tx_tls_encrypted_bytes;
@@ -352,6 +364,19 @@  struct mlx5e_rq_stats {
 	u64 congst_umr;
 	u64 arfs_err;
 	u64 recover;
+#ifdef CONFIG_PAGE_POOL_STATS
+	u64 pp_alloc_fast;
+	u64 pp_alloc_slow;
+	u64 pp_alloc_slow_high_order;
+	u64 pp_alloc_empty;
+	u64 pp_alloc_refill;
+	u64 pp_alloc_waive;
+	u64 pp_recycle_cached;
+	u64 pp_recycle_cache_full;
+	u64 pp_recycle_ring;
+	u64 pp_recycle_ring_full;
+	u64 pp_recycle_released_ref;
+#endif
 #ifdef CONFIG_MLX5_EN_TLS
 	u64 tls_decrypted_packets;
 	u64 tls_decrypted_bytes;