@@ -52,6 +52,7 @@ struct pp_alloc_cache {
* @dev: device, for DMA pre-mapping purposes
* @netdev: netdev this pool will serve (leave as NULL if none or multiple)
* @napi: NAPI which is the sole consumer of pages, otherwise NULL
+ * @queue: struct netdev_rx_queue this page_pool is being created for.
* @dma_dir: DMA mapping direction
* @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
* @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
@@ -64,6 +65,7 @@ struct page_pool_params {
int nid;
struct device *dev;
struct napi_struct *napi;
+ struct netdev_rx_queue *queue;
enum dma_data_direction dma_dir;
unsigned int max_len;
unsigned int offset;
@@ -126,6 +128,13 @@ struct page_pool_stats {
};
#endif
+struct memory_provider_ops {
+ int (*init)(struct page_pool *pool);
+ void (*destroy)(struct page_pool *pool);
+ struct page *(*alloc_pages)(struct page_pool *pool, gfp_t gfp);
+ bool (*release_page)(struct page_pool *pool, struct page *page);
+};
+
struct page_pool {
struct page_pool_params_fast p;
@@ -176,6 +185,9 @@ struct page_pool {
*/
struct ptr_ring ring;
+ void *mp_priv;
+ const struct memory_provider_ops *mp_ops;
+
#ifdef CONFIG_PAGE_POOL_STATS
/* recycle stats are per-cpu to avoid locking */
struct page_pool_recycle_stats __percpu *recycle_stats;
@@ -25,6 +25,8 @@
#include "page_pool_priv.h"
+static DEFINE_STATIC_KEY_FALSE(page_pool_mem_providers);
+
#define DEFER_TIME (msecs_to_jiffies(1000))
#define DEFER_WARN_INTERVAL (60 * HZ)
@@ -177,6 +179,7 @@ static int page_pool_init(struct page_pool *pool,
int cpuid)
{
unsigned int ring_qsize = 1024; /* Default */
+ int err;
memcpy(&pool->p, ¶ms->fast, sizeof(pool->p));
memcpy(&pool->slow, ¶ms->slow, sizeof(pool->slow));
@@ -248,10 +251,25 @@ static int page_pool_init(struct page_pool *pool,
/* Driver calling page_pool_create() also call page_pool_destroy() */
refcount_set(&pool->user_cnt, 1);
+ if (pool->mp_ops) {
+ err = pool->mp_ops->init(pool);
+ if (err) {
+ pr_warn("%s() mem-provider init failed %d\n", __func__,
+ err);
+ goto free_ptr_ring;
+ }
+
+ static_branch_inc(&page_pool_mem_providers);
+ }
+
if (pool->p.flags & PP_FLAG_DMA_MAP)
get_device(pool->p.dev);
return 0;
+
+free_ptr_ring:
+ ptr_ring_cleanup(&pool->ring, NULL);
+ return err;
}
static void page_pool_uninit(struct page_pool *pool)
@@ -546,7 +564,10 @@ struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
return page;
/* Slow-path: cache empty, do real allocation */
- page = __page_pool_alloc_pages_slow(pool, gfp);
+ if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops)
+ page = pool->mp_ops->alloc_pages(pool, gfp);
+ else
+ page = __page_pool_alloc_pages_slow(pool, gfp);
return page;
}
EXPORT_SYMBOL(page_pool_alloc_pages);
@@ -603,10 +624,13 @@ void __page_pool_release_page_dma(struct page_pool *pool, struct page *page)
void page_pool_return_page(struct page_pool *pool, struct page *page)
{
int count;
+ bool put;
- __page_pool_release_page_dma(pool, page);
-
- page_pool_clear_pp_info(page);
+ put = true;
+ if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops)
+ put = pool->mp_ops->release_page(pool, page);
+ else
+ __page_pool_release_page_dma(pool, page);
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.
@@ -614,7 +638,10 @@ void page_pool_return_page(struct page_pool *pool, struct page *page)
count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
trace_page_pool_state_release(pool, page, count);
- put_page(page);
+ if (put) {
+ page_pool_clear_pp_info(page);
+ put_page(page);
+ }
/* An optimization would be to call __free_pages(page, pool->p.order)
* knowing page is not part of page-cache (thus avoiding a
* __page_cache_release() call).
@@ -889,6 +916,12 @@ static void __page_pool_destroy(struct page_pool *pool)
page_pool_unlist(pool);
page_pool_uninit(pool);
+
+ if (pool->mp_ops) {
+ pool->mp_ops->destroy(pool);
+ static_branch_dec(&page_pool_mem_providers);
+ }
+
kfree(pool);
}