Message ID | 1645810914-35485-2-git-send-email-jdamato@fastly.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | page_pool: Add stats counters | expand |
On 25/02/2022 18.41, Joe Damato wrote: > Add per-pool statistics counters for the allocation path of a page pool. > These stats are incremented in softirq context, so no locking or per-cpu > variables are needed. > > This code is disabled by default and a kernel config option is provided for > users who wish to enable them. > > The statistics added are: > - fast: successful fast path allocations > - slow: slow path order-0 allocations > - slow_high_order: slow path high order allocations > - empty: ptr ring is empty, so a slow path allocation was forced. > - refill: an allocation which triggered a refill of the cache > - waive: pages obtained from the ptr ring that cannot be added to > the cache due to a NUMA mismatch. > > Signed-off-by: Joe Damato<jdamato@fastly.com> > --- > include/net/page_pool.h | 18 ++++++++++++++++++ > net/Kconfig | 13 +++++++++++++ > net/core/page_pool.c | 24 ++++++++++++++++++++---- > 3 files changed, 51 insertions(+), 4 deletions(-) > > diff --git a/include/net/page_pool.h b/include/net/page_pool.h > index 97c3c19..1f27e8a4 100644 > --- a/include/net/page_pool.h > +++ b/include/net/page_pool.h > @@ -84,6 +84,19 @@ struct page_pool_params { > void *init_arg; > }; > > +#ifdef CONFIG_PAGE_POOL_STATS > +struct page_pool_alloc_stats { > + u64 fast; /* fast path allocations */ > + u64 slow; /* slow-path order 0 allocations */ > + u64 slow_high_order; /* slow-path high order allocations */ > + u64 empty; /* failed refills due to empty ptr ring, forcing > + * slow path allocation > + */ > + u64 refill; /* allocations via successful refill */ > + u64 waive; /* failed refills due to numa zone mismatch */ > +}; > +#endif > + > struct page_pool { > struct page_pool_params p; > > @@ -96,6 +109,11 @@ struct page_pool { > unsigned int frag_offset; > struct page *frag_page; > long frag_users; > + > +#ifdef CONFIG_PAGE_POOL_STATS > + /* these stats are incremented while in softirq context */ > + struct page_pool_alloc_stats alloc_stats; > +#endif > u32 xdp_mem_id; I like the cache-line placement. Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
On Mon, 28 Feb 2022 at 09:07, Jesper Dangaard Brouer <jbrouer@redhat.com> wrote: > > > On 25/02/2022 18.41, Joe Damato wrote: > > Add per-pool statistics counters for the allocation path of a page pool. > > These stats are incremented in softirq context, so no locking or per-cpu > > variables are needed. > > > > This code is disabled by default and a kernel config option is provided for > > users who wish to enable them. > > > > The statistics added are: > > - fast: successful fast path allocations > > - slow: slow path order-0 allocations > > - slow_high_order: slow path high order allocations > > - empty: ptr ring is empty, so a slow path allocation was forced. > > - refill: an allocation which triggered a refill of the cache > > - waive: pages obtained from the ptr ring that cannot be added to > > the cache due to a NUMA mismatch. > > > > Signed-off-by: Joe Damato<jdamato@fastly.com> > > --- > > include/net/page_pool.h | 18 ++++++++++++++++++ > > net/Kconfig | 13 +++++++++++++ > > net/core/page_pool.c | 24 ++++++++++++++++++++---- > > 3 files changed, 51 insertions(+), 4 deletions(-) > > > > diff --git a/include/net/page_pool.h b/include/net/page_pool.h > > index 97c3c19..1f27e8a4 100644 > > --- a/include/net/page_pool.h > > +++ b/include/net/page_pool.h > > @@ -84,6 +84,19 @@ struct page_pool_params { > > void *init_arg; > > }; > > > > +#ifdef CONFIG_PAGE_POOL_STATS > > +struct page_pool_alloc_stats { > > + u64 fast; /* fast path allocations */ > > + u64 slow; /* slow-path order 0 allocations */ > > + u64 slow_high_order; /* slow-path high order allocations */ > > + u64 empty; /* failed refills due to empty ptr ring, forcing > > + * slow path allocation > > + */ > > + u64 refill; /* allocations via successful refill */ > > + u64 waive; /* failed refills due to numa zone mismatch */ > > +}; > > +#endif > > + > > struct page_pool { > > struct page_pool_params p; > > > > @@ -96,6 +109,11 @@ struct page_pool { > > unsigned int frag_offset; > > struct page *frag_page; > > long frag_users; > > + > > +#ifdef CONFIG_PAGE_POOL_STATS > > + /* these stats are incremented while in softirq context */ > > + struct page_pool_alloc_stats alloc_stats; > > +#endif > > u32 xdp_mem_id; > > I like the cache-line placement. > > Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> > Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
diff --git a/include/net/page_pool.h b/include/net/page_pool.h index 97c3c19..1f27e8a4 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -84,6 +84,19 @@ struct page_pool_params { void *init_arg; }; +#ifdef CONFIG_PAGE_POOL_STATS +struct page_pool_alloc_stats { + u64 fast; /* fast path allocations */ + u64 slow; /* slow-path order 0 allocations */ + u64 slow_high_order; /* slow-path high order allocations */ + u64 empty; /* failed refills due to empty ptr ring, forcing + * slow path allocation + */ + u64 refill; /* allocations via successful refill */ + u64 waive; /* failed refills due to numa zone mismatch */ +}; +#endif + struct page_pool { struct page_pool_params p; @@ -96,6 +109,11 @@ struct page_pool { unsigned int frag_offset; struct page *frag_page; long frag_users; + +#ifdef CONFIG_PAGE_POOL_STATS + /* these stats are incremented while in softirq context */ + struct page_pool_alloc_stats alloc_stats; +#endif u32 xdp_mem_id; /* diff --git a/net/Kconfig b/net/Kconfig index 8a1f9d0..6b78f69 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -434,6 +434,19 @@ config NET_DEVLINK config PAGE_POOL bool +config PAGE_POOL_STATS + default n + bool "Page pool stats" + depends on PAGE_POOL + help + Enable page pool statistics to track page allocation and recycling + in page pools. This option incurs additional CPU cost in allocation + and recycle paths and additional memory cost to store the statistics. + These statistics are only available if this option is enabled and if + the driver using the page pool supports exporting this data. + + If unsure, say N. + config FAILOVER tristate "Generic failover module" help diff --git a/net/core/page_pool.c b/net/core/page_pool.c index e25d359..0fa4b76 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -26,6 +26,13 @@ #define BIAS_MAX LONG_MAX +#ifdef CONFIG_PAGE_POOL_STATS +/* alloc_stat_inc is intended to be used in softirq context */ +#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) +#else +#define alloc_stat_inc(pool, __stat) +#endif + static int page_pool_init(struct page_pool *pool, const struct page_pool_params *params) { @@ -117,8 +124,10 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) int pref_nid; /* preferred NUMA node */ /* Quicker fallback, avoid locks when ring is empty */ - if (__ptr_ring_empty(r)) + if (__ptr_ring_empty(r)) { + alloc_stat_inc(pool, empty); return NULL; + } /* Softirq guarantee CPU and thus NUMA node is stable. This, * assumes CPU refilling driver RX-ring will also run RX-NAPI. @@ -145,14 +154,17 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) * This limit stress on page buddy alloactor. */ page_pool_return_page(pool, page); + alloc_stat_inc(pool, waive); page = NULL; break; } } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); /* Return last page */ - if (likely(pool->alloc.count > 0)) + if (likely(pool->alloc.count > 0)) { page = pool->alloc.cache[--pool->alloc.count]; + alloc_stat_inc(pool, refill); + } return page; } @@ -166,6 +178,7 @@ static struct page *__page_pool_get_cached(struct page_pool *pool) if (likely(pool->alloc.count)) { /* Fast-path */ page = pool->alloc.cache[--pool->alloc.count]; + alloc_stat_inc(pool, fast); } else { page = page_pool_refill_alloc_cache(pool); } @@ -239,6 +252,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool, return NULL; } + alloc_stat_inc(pool, slow_high_order); page_pool_set_pp_info(pool, page); /* Track how many pages are held 'in-flight' */ @@ -293,10 +307,12 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, } /* Return last page */ - if (likely(pool->alloc.count > 0)) + if (likely(pool->alloc.count > 0)) { page = pool->alloc.cache[--pool->alloc.count]; - else + alloc_stat_inc(pool, slow); + } else { page = NULL; + } /* When page just alloc'ed is should/must have refcnt 1. */ return page;
Add per-pool statistics counters for the allocation path of a page pool. These stats are incremented in softirq context, so no locking or per-cpu variables are needed. This code is disabled by default and a kernel config option is provided for users who wish to enable them. The statistics added are: - fast: successful fast path allocations - slow: slow path order-0 allocations - slow_high_order: slow path high order allocations - empty: ptr ring is empty, so a slow path allocation was forced. - refill: an allocation which triggered a refill of the cache - waive: pages obtained from the ptr ring that cannot be added to the cache due to a NUMA mismatch. Signed-off-by: Joe Damato <jdamato@fastly.com> --- include/net/page_pool.h | 18 ++++++++++++++++++ net/Kconfig | 13 +++++++++++++ net/core/page_pool.c | 24 ++++++++++++++++++++---- 3 files changed, 51 insertions(+), 4 deletions(-)