@@ -117,16 +117,28 @@ static inline void *netmem_to_virt(const struct netmem *nmem)
return page_to_virt(netmem_page(nmem));
}
+static inline void *netmem_address(const struct netmem *nmem)
+{
+ return page_address(netmem_page(nmem));
+}
+
static inline int netmem_ref_count(const struct netmem *nmem)
{
return page_ref_count(netmem_page(nmem));
}
+static inline void netmem_get(struct netmem *nmem)
+{
+ struct folio *folio = (struct folio *)nmem;
+
+ folio_get(folio);
+}
+
static inline void netmem_put(struct netmem *nmem)
{
struct folio *folio = (struct folio *)nmem;
- return folio_put(folio);
+ folio_put(folio);
}
static inline bool netmem_is_pfmemalloc(const struct netmem *nmem)
@@ -295,6 +307,11 @@ struct page_pool {
struct netmem *page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp);
+static inline struct netmem *page_pool_dev_alloc_netmem(struct page_pool *pool)
+{
+ return page_pool_alloc_netmem(pool, GFP_ATOMIC | __GFP_NOWARN);
+}
+
static inline
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
{
@@ -452,6 +469,12 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
page_pool_put_full_page(pool, page, true);
}
+static inline void page_pool_recycle_netmem(struct page_pool *pool,
+ struct netmem *nmem)
+{
+ page_pool_put_full_netmem(pool, nmem, true);
+}
+
#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
(sizeof(dma_addr_t) > sizeof(unsigned long))