diff mbox series

[net-next,v7,1/4] page_pool: Add allocation stats

Message ID 1645810914-35485-2-git-send-email-jdamato@fastly.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series page_pool: Add stats counters | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 5978 this patch: 5978
netdev/cc_maintainers success CCed 5 of 5 maintainers
netdev/build_clang success Errors and warnings before: 878 this patch: 878
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 6129 this patch: 6129
netdev/checkpatch warning CHECK: Macro argument '__stat' may be better as '(__stat)' to avoid precedence issues CHECK: Macro argument 'pool' may be better as '(pool)' to avoid precedence issues
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Joe Damato Feb. 25, 2022, 5:41 p.m. UTC
Add per-pool statistics counters for the allocation path of a page pool.
These stats are incremented in softirq context, so no locking or per-cpu
variables are needed.

This code is disabled by default and a kernel config option is provided for
users who wish to enable them.

The statistics added are:
	- fast: successful fast path allocations
	- slow: slow path order-0 allocations
	- slow_high_order: slow path high order allocations
	- empty: ptr ring is empty, so a slow path allocation was forced.
	- refill: an allocation which triggered a refill of the cache
	- waive: pages obtained from the ptr ring that cannot be added to
	  the cache due to a NUMA mismatch.

Signed-off-by: Joe Damato <jdamato@fastly.com>
---
 include/net/page_pool.h | 18 ++++++++++++++++++
 net/Kconfig             | 13 +++++++++++++
 net/core/page_pool.c    | 24 ++++++++++++++++++++----
 3 files changed, 51 insertions(+), 4 deletions(-)

Comments

Jesper Dangaard Brouer Feb. 28, 2022, 7:07 a.m. UTC | #1
On 25/02/2022 18.41, Joe Damato wrote:
> Add per-pool statistics counters for the allocation path of a page pool.
> These stats are incremented in softirq context, so no locking or per-cpu
> variables are needed.
> 
> This code is disabled by default and a kernel config option is provided for
> users who wish to enable them.
> 
> The statistics added are:
> 	- fast: successful fast path allocations
> 	- slow: slow path order-0 allocations
> 	- slow_high_order: slow path high order allocations
> 	- empty: ptr ring is empty, so a slow path allocation was forced.
> 	- refill: an allocation which triggered a refill of the cache
> 	- waive: pages obtained from the ptr ring that cannot be added to
> 	  the cache due to a NUMA mismatch.
> 
> Signed-off-by: Joe Damato<jdamato@fastly.com>
> ---
>   include/net/page_pool.h | 18 ++++++++++++++++++
>   net/Kconfig             | 13 +++++++++++++
>   net/core/page_pool.c    | 24 ++++++++++++++++++++----
>   3 files changed, 51 insertions(+), 4 deletions(-)
> 
> diff --git a/include/net/page_pool.h b/include/net/page_pool.h
> index 97c3c19..1f27e8a4 100644
> --- a/include/net/page_pool.h
> +++ b/include/net/page_pool.h
> @@ -84,6 +84,19 @@ struct page_pool_params {
>   	void *init_arg;
>   };
>   
> +#ifdef CONFIG_PAGE_POOL_STATS
> +struct page_pool_alloc_stats {
> +	u64 fast; /* fast path allocations */
> +	u64 slow; /* slow-path order 0 allocations */
> +	u64 slow_high_order; /* slow-path high order allocations */
> +	u64 empty; /* failed refills due to empty ptr ring, forcing
> +		    * slow path allocation
> +		    */
> +	u64 refill; /* allocations via successful refill */
> +	u64 waive;  /* failed refills due to numa zone mismatch */
> +};
> +#endif
> +
>   struct page_pool {
>   	struct page_pool_params p;
>   
> @@ -96,6 +109,11 @@ struct page_pool {
>   	unsigned int frag_offset;
>   	struct page *frag_page;
>   	long frag_users;
> +
> +#ifdef CONFIG_PAGE_POOL_STATS
> +	/* these stats are incremented while in softirq context */
> +	struct page_pool_alloc_stats alloc_stats;
> +#endif
>   	u32 xdp_mem_id;

I like the cache-line placement.

Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Ilias Apalodimas Feb. 28, 2022, 7:28 a.m. UTC | #2
On Mon, 28 Feb 2022 at 09:07, Jesper Dangaard Brouer <jbrouer@redhat.com> wrote:
>
>
> On 25/02/2022 18.41, Joe Damato wrote:
> > Add per-pool statistics counters for the allocation path of a page pool.
> > These stats are incremented in softirq context, so no locking or per-cpu
> > variables are needed.
> >
> > This code is disabled by default and a kernel config option is provided for
> > users who wish to enable them.
> >
> > The statistics added are:
> >       - fast: successful fast path allocations
> >       - slow: slow path order-0 allocations
> >       - slow_high_order: slow path high order allocations
> >       - empty: ptr ring is empty, so a slow path allocation was forced.
> >       - refill: an allocation which triggered a refill of the cache
> >       - waive: pages obtained from the ptr ring that cannot be added to
> >         the cache due to a NUMA mismatch.
> >
> > Signed-off-by: Joe Damato<jdamato@fastly.com>
> > ---
> >   include/net/page_pool.h | 18 ++++++++++++++++++
> >   net/Kconfig             | 13 +++++++++++++
> >   net/core/page_pool.c    | 24 ++++++++++++++++++++----
> >   3 files changed, 51 insertions(+), 4 deletions(-)
> >
> > diff --git a/include/net/page_pool.h b/include/net/page_pool.h
> > index 97c3c19..1f27e8a4 100644
> > --- a/include/net/page_pool.h
> > +++ b/include/net/page_pool.h
> > @@ -84,6 +84,19 @@ struct page_pool_params {
> >       void *init_arg;
> >   };
> >
> > +#ifdef CONFIG_PAGE_POOL_STATS
> > +struct page_pool_alloc_stats {
> > +     u64 fast; /* fast path allocations */
> > +     u64 slow; /* slow-path order 0 allocations */
> > +     u64 slow_high_order; /* slow-path high order allocations */
> > +     u64 empty; /* failed refills due to empty ptr ring, forcing
> > +                 * slow path allocation
> > +                 */
> > +     u64 refill; /* allocations via successful refill */
> > +     u64 waive;  /* failed refills due to numa zone mismatch */
> > +};
> > +#endif
> > +
> >   struct page_pool {
> >       struct page_pool_params p;
> >
> > @@ -96,6 +109,11 @@ struct page_pool {
> >       unsigned int frag_offset;
> >       struct page *frag_page;
> >       long frag_users;
> > +
> > +#ifdef CONFIG_PAGE_POOL_STATS
> > +     /* these stats are incremented while in softirq context */
> > +     struct page_pool_alloc_stats alloc_stats;
> > +#endif
> >       u32 xdp_mem_id;
>
> I like the cache-line placement.
>
> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
>

Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
diff mbox series

Patch

diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 97c3c19..1f27e8a4 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -84,6 +84,19 @@  struct page_pool_params {
 	void *init_arg;
 };
 
+#ifdef CONFIG_PAGE_POOL_STATS
+struct page_pool_alloc_stats {
+	u64 fast; /* fast path allocations */
+	u64 slow; /* slow-path order 0 allocations */
+	u64 slow_high_order; /* slow-path high order allocations */
+	u64 empty; /* failed refills due to empty ptr ring, forcing
+		    * slow path allocation
+		    */
+	u64 refill; /* allocations via successful refill */
+	u64 waive;  /* failed refills due to numa zone mismatch */
+};
+#endif
+
 struct page_pool {
 	struct page_pool_params p;
 
@@ -96,6 +109,11 @@  struct page_pool {
 	unsigned int frag_offset;
 	struct page *frag_page;
 	long frag_users;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+	/* these stats are incremented while in softirq context */
+	struct page_pool_alloc_stats alloc_stats;
+#endif
 	u32 xdp_mem_id;
 
 	/*
diff --git a/net/Kconfig b/net/Kconfig
index 8a1f9d0..6b78f69 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -434,6 +434,19 @@  config NET_DEVLINK
 config PAGE_POOL
 	bool
 
+config PAGE_POOL_STATS
+	default n
+	bool "Page pool stats"
+	depends on PAGE_POOL
+	help
+	  Enable page pool statistics to track page allocation and recycling
+	  in page pools. This option incurs additional CPU cost in allocation
+	  and recycle paths and additional memory cost to store the statistics.
+	  These statistics are only available if this option is enabled and if
+	  the driver using the page pool supports exporting this data.
+
+	  If unsure, say N.
+
 config FAILOVER
 	tristate "Generic failover module"
 	help
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index e25d359..0fa4b76 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -26,6 +26,13 @@ 
 
 #define BIAS_MAX	LONG_MAX
 
+#ifdef CONFIG_PAGE_POOL_STATS
+/* alloc_stat_inc is intended to be used in softirq context */
+#define alloc_stat_inc(pool, __stat)	(pool->alloc_stats.__stat++)
+#else
+#define alloc_stat_inc(pool, __stat)
+#endif
+
 static int page_pool_init(struct page_pool *pool,
 			  const struct page_pool_params *params)
 {
@@ -117,8 +124,10 @@  static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
 	int pref_nid; /* preferred NUMA node */
 
 	/* Quicker fallback, avoid locks when ring is empty */
-	if (__ptr_ring_empty(r))
+	if (__ptr_ring_empty(r)) {
+		alloc_stat_inc(pool, empty);
 		return NULL;
+	}
 
 	/* Softirq guarantee CPU and thus NUMA node is stable. This,
 	 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
@@ -145,14 +154,17 @@  static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
 			 * This limit stress on page buddy alloactor.
 			 */
 			page_pool_return_page(pool, page);
+			alloc_stat_inc(pool, waive);
 			page = NULL;
 			break;
 		}
 	} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
 
 	/* Return last page */
-	if (likely(pool->alloc.count > 0))
+	if (likely(pool->alloc.count > 0)) {
 		page = pool->alloc.cache[--pool->alloc.count];
+		alloc_stat_inc(pool, refill);
+	}
 
 	return page;
 }
@@ -166,6 +178,7 @@  static struct page *__page_pool_get_cached(struct page_pool *pool)
 	if (likely(pool->alloc.count)) {
 		/* Fast-path */
 		page = pool->alloc.cache[--pool->alloc.count];
+		alloc_stat_inc(pool, fast);
 	} else {
 		page = page_pool_refill_alloc_cache(pool);
 	}
@@ -239,6 +252,7 @@  static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
 		return NULL;
 	}
 
+	alloc_stat_inc(pool, slow_high_order);
 	page_pool_set_pp_info(pool, page);
 
 	/* Track how many pages are held 'in-flight' */
@@ -293,10 +307,12 @@  static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
 	}
 
 	/* Return last page */
-	if (likely(pool->alloc.count > 0))
+	if (likely(pool->alloc.count > 0)) {
 		page = pool->alloc.cache[--pool->alloc.count];
-	else
+		alloc_stat_inc(pool, slow);
+	} else {
 		page = NULL;
+	}
 
 	/* When page just alloc'ed is should/must have refcnt 1. */
 	return page;