@@ -88,6 +88,7 @@ struct page_pool_stats {
* slow path allocation
*/
u64 refill; /* allocations via successful refill */
+ u64 waive; /* failed refills due to numa zone mismatch */
} alloc;
};
@@ -226,6 +227,11 @@ static inline u64 page_pool_stats_get_refill(struct page_pool *pool)
{
return pool->ps.alloc.refill;
}
+
+static inline u64 page_pool_stats_get_waive(struct page_pool *pool)
+{
+ return pool->ps.alloc.waive;
+}
#else
static inline void page_pool_destroy(struct page_pool *pool)
{
@@ -270,6 +276,11 @@ static inline u64 page_pool_stats_get_refill(struct page_pool *pool)
{
return 0;
}
+
+static inline u64 page_pool_stats_get_waive(struct page_pool *pool)
+{
+ return 0;
+}
#endif
void page_pool_put_page(struct page_pool *pool, struct page *page,
@@ -147,6 +147,7 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
* This limit stress on page buddy alloactor.
*/
page_pool_return_page(pool, page);
+ pool->ps.alloc.waive++;
page = NULL;
break;
}
Track how often pages obtained from the ring cannot be added to the cache because of a NUMA mismatch. A static inline wrapper is added for accessing this stat. Signed-off-by: Joe Damato <jdamato@fastly.com> --- include/net/page_pool.h | 11 +++++++++++ net/core/page_pool.c | 1 + 2 files changed, 12 insertions(+)