@@ -82,6 +82,7 @@ struct pp_alloc_cache {
struct page_pool_stats {
struct {
u64 fast; /* fast path allocations */
+ u64 slow; /* slow-path order-0 allocations */
} alloc;
};
@@ -201,6 +202,10 @@ static inline u64 page_pool_stats_get_fast(struct page_pool *pool)
return pool->ps.alloc.fast;
}
+static inline u64 page_pool_stats_get_slow(struct page_pool *pool)
+{
+ return pool->ps.alloc.slow;
+}
#else
static inline void page_pool_destroy(struct page_pool *pool)
{
@@ -225,6 +230,11 @@ static inline u64 page_pool_stats_get_fast(struct page_pool *pool)
{
return 0;
}
+
+static inline u64 page_pool_stats_get_slow(struct page_pool *pool)
+{
+ return 0;
+}
#endif
void page_pool_put_page(struct page_pool *pool, struct page *page,
@@ -294,10 +294,12 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
}
/* Return last page */
- if (likely(pool->alloc.count > 0))
+ if (likely(pool->alloc.count > 0)) {
page = pool->alloc.cache[--pool->alloc.count];
- else
+ pool->ps.alloc.slow++;
+ } else {
page = NULL;
+ }
/* When page just alloc'ed is should/must have refcnt 1. */
return page;
Add a stat, 'slow', for the slow allocation path. A static inline accessor function is exposed for accessing this stat. Signed-off-by: Joe Damato <jdamato@fastly.com> --- include/net/page_pool.h | 10 ++++++++++ net/core/page_pool.c | 6 ++++-- 2 files changed, 14 insertions(+), 2 deletions(-)