diff mbox series

[net-next,2/6] net: page_pool: Add a stat for the slow alloc path

Message ID 1643237300-44904-3-git-send-email-jdamato@fastly.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series net: page_pool: Add page_pool stat counters | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 5991 this patch: 5991
netdev/cc_maintainers success CCed 5 of 5 maintainers
netdev/build_clang success Errors and warnings before: 882 this patch: 882
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 6142 this patch: 6142
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 42 lines checked
netdev/kdoc success Errors and warnings before: 2 this patch: 2
netdev/source_inline success Was 0 now: 0

Commit Message

Joe Damato Jan. 26, 2022, 10:48 p.m. UTC
Add a stat, 'slow', for the slow allocation path. A static inline accessor
function is exposed for accessing this stat.

Signed-off-by: Joe Damato <jdamato@fastly.com>
---
 include/net/page_pool.h | 10 ++++++++++
 net/core/page_pool.c    |  6 ++++--
 2 files changed, 14 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 3ae3dc4..b5691ee 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -82,6 +82,7 @@  struct pp_alloc_cache {
 struct page_pool_stats {
 	struct {
 		u64 fast; /* fast path allocations */
+		u64 slow; /* slow-path order-0 allocations */
 	} alloc;
 };
 
@@ -201,6 +202,10 @@  static inline u64 page_pool_stats_get_fast(struct page_pool *pool)
 	return pool->ps.alloc.fast;
 }
 
+static inline u64 page_pool_stats_get_slow(struct page_pool *pool)
+{
+	return pool->ps.alloc.slow;
+}
 #else
 static inline void page_pool_destroy(struct page_pool *pool)
 {
@@ -225,6 +230,11 @@  static inline u64 page_pool_stats_get_fast(struct page_pool *pool)
 {
 	return 0;
 }
+
+static inline u64 page_pool_stats_get_slow(struct page_pool *pool)
+{
+	return 0;
+}
 #endif
 
 void page_pool_put_page(struct page_pool *pool, struct page *page,
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 84c9566..9dbe721 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -294,10 +294,12 @@  static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
 	}
 
 	/* Return last page */
-	if (likely(pool->alloc.count > 0))
+	if (likely(pool->alloc.count > 0)) {
 		page = pool->alloc.cache[--pool->alloc.count];
-	else
+		pool->ps.alloc.slow++;
+	} else {
 		page = NULL;
+	}
 
 	/* When page just alloc'ed is should/must have refcnt 1. */
 	return page;