@@ -18,7 +18,7 @@
*
* API keeps track of in-flight pages, in-order to let API user know
* when it is safe to dealloactor page_pool object. Thus, API users
- * must make sure to call page_pool_release_page() when a page is
+ * must make sure to call page_pool_release_netmem() when a page is
* "leaving" the page_pool. Or call page_pool_put_page() where
* appropiate. For maintaining correct accounting.
*
@@ -332,7 +332,7 @@ struct xdp_mem_info;
void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
struct xdp_mem_info *mem);
-void page_pool_release_page(struct page_pool *pool, struct page *page);
+void page_pool_release_netmem(struct page_pool *pool, struct netmem *nmem);
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count);
#else
@@ -345,8 +345,8 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool,
struct xdp_mem_info *mem)
{
}
-static inline void page_pool_release_page(struct page_pool *pool,
- struct page *page)
+static inline void page_pool_release_netmem(struct page_pool *pool,
+ struct netmem *nmem)
{
}
@@ -356,6 +356,12 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
}
#endif
+static inline void page_pool_release_page(struct page_pool *pool,
+ struct page *page)
+{
+ page_pool_release_netmem(pool, page_netmem(page));
+}
+
void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size,
bool allow_direct);
@@ -42,26 +42,26 @@ TRACE_EVENT(page_pool_release,
TRACE_EVENT(page_pool_state_release,
TP_PROTO(const struct page_pool *pool,
- const struct page *page, u32 release),
+ const struct netmem *nmem, u32 release),
- TP_ARGS(pool, page, release),
+ TP_ARGS(pool, nmem, release),
TP_STRUCT__entry(
__field(const struct page_pool *, pool)
- __field(const struct page *, page)
+ __field(const struct netmem *, nmem)
__field(u32, release)
__field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
- __entry->page = page;
+ __entry->nmem = nmem;
__entry->release = release;
- __entry->pfn = page_to_pfn(page);
+ __entry->pfn = netmem_pfn(nmem);
),
- TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u",
- __entry->pool, __entry->page, __entry->pfn, __entry->release)
+ TP_printk("page_pool=%p nmem=%p pfn=0x%lx release=%u",
+ __entry->pool, __entry->nmem, __entry->pfn, __entry->release)
);
TRACE_EVENT(page_pool_state_hold,
@@ -336,10 +336,10 @@ static void page_pool_set_pp_info(struct page_pool *pool,
pool->p.init_callback(page, pool->p.init_arg);
}
-static void page_pool_clear_pp_info(struct page *page)
+static void page_pool_clear_pp_info(struct netmem *nmem)
{
- page->pp_magic = 0;
- page->pp = NULL;
+ nmem->pp_magic = 0;
+ nmem->pp = NULL;
}
static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
@@ -467,7 +467,7 @@ static s32 page_pool_inflight(struct page_pool *pool)
* a regular page (that will eventually be returned to the normal
* page-allocator via put_page).
*/
-void page_pool_release_page(struct page_pool *pool, struct page *page)
+void page_pool_release_netmem(struct page_pool *pool, struct netmem *nmem)
{
dma_addr_t dma;
int count;
@@ -478,23 +478,23 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
*/
goto skip_dma_unmap;
- dma = page_pool_get_dma_addr(page);
+ dma = netmem_get_dma_addr(nmem);
/* When page is unmapped, it cannot be returned to our pool */
dma_unmap_page_attrs(pool->p.dev, dma,
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC);
- page_pool_set_dma_addr(page, 0);
+ netmem_set_dma_addr(nmem, 0);
skip_dma_unmap:
- page_pool_clear_pp_info(page);
+ page_pool_clear_pp_info(nmem);
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.
*/
count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
- trace_page_pool_state_release(pool, page, count);
+ trace_page_pool_state_release(pool, nmem, count);
}
-EXPORT_SYMBOL(page_pool_release_page);
+EXPORT_SYMBOL(page_pool_release_netmem);
/* Return a page to the page allocator, cleaning up our state */
static void page_pool_return_page(struct page_pool *pool, struct page *page)
Also convert page_pool_clear_pp_info() and trace_page_pool_state_release() to take a netmem. Include a wrapper for page_pool_release_page() to avoid converting all callers. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- include/net/page_pool.h | 14 ++++++++++---- include/trace/events/page_pool.h | 14 +++++++------- net/core/page_pool.c | 18 +++++++++--------- 3 files changed, 26 insertions(+), 20 deletions(-)