@@ -397,6 +397,38 @@ static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
return false;
}
+static inline dma_addr_t __page_pool_dma_sync_va_for_device(const void *va,
+ u32 dma_sync_size,
+ bool compound)
+{
+ const struct page_pool *pool;
+ const struct page *page;
+ dma_addr_t addr;
+ u32 offset;
+
+ if (unlikely(compound)) {
+ page = virt_to_head_page(va);
+ offset = va - page_address(page);
+ } else {
+ page = virt_to_page(va);
+ offset = offset_in_page(va);
+ }
+
+ addr = page_pool_get_dma_addr(page) + offset;
+ pool = page->pp;
+
+ dma_sync_single_for_device(pool->p.dev, addr, dma_sync_size,
+ page_pool_get_dma_dir(pool));
+
+ return addr;
+}
+
+static inline dma_addr_t page_pool_dma_sync_va_for_device(const void *va,
+ u32 dma_sync_size)
+{
+ return __page_pool_dma_sync_va_for_device(va, dma_sync_size, false);
+}
+
/**
* page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW
* @pool: &page_pool the @page belongs to
Drivers using Page Pool for Rx buffers do the same pattern on XDP_TX: syncing-DMA-for-device and obtaining DMA address for &xdp_buff they are sending. Add a helper for that to be able to do that in one call in the drivers. I explicitly added `bool compound` argument and set it to false by default: only a few drivers, if any, uses high-order pages with Page Pool, so losing cycles on compound_head() looks suboptimal. Drivers can always call the underscored version if needed (for example, pass pool->p.order as the last argument -- will always work). Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> --- include/net/page_pool/helpers.h | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+)