@@ -1009,7 +1009,8 @@ static void fec_enet_bd_init(struct net_device *dev)
struct page *page = txq->tx_buf[i].buf_p;
if (page)
- page_pool_put_page(page->pp, page, 0, false);
+ page_pool_put_page(page_pool_to_pp(page),
+ page, 0, false);
}
txq->tx_buf[i].buf_p = NULL;
@@ -1549,7 +1550,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
xdp_return_frame_rx_napi(xdpf);
} else { /* recycle pages of XDP_TX frames */
/* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
- page_pool_put_page(page->pp, page, 0, true);
+ page_pool_put_page(page_pool_to_pp(page), page, 0, true);
}
txq->tx_buf[index].buf_p = NULL;
@@ -3311,7 +3312,8 @@ static void fec_enet_free_buffers(struct net_device *ndev)
} else {
struct page *page = txq->tx_buf[i].buf_p;
- page_pool_put_page(page->pp, page, 0, false);
+ page_pool_put_page(page_pool_to_pp(page),
+ page, 0, false);
}
txq->tx_buf[i].buf_p = NULL;
@@ -210,8 +210,8 @@ void gve_free_to_page_pool(struct gve_rx_ring *rx,
if (!page)
return;
- page_pool_put_page(page->pp, page, buf_state->page_info.buf_size,
- allow_direct);
+ page_pool_put_page(page_pool_to_pp(page), page,
+ buf_state->page_info.buf_size, allow_direct);
buf_state->page_info.page = NULL;
}
@@ -1050,7 +1050,8 @@ static void iavf_add_rx_frag(struct sk_buff *skb,
const struct libeth_fqe *rx_buffer,
unsigned int size)
{
- u32 hr = rx_buffer->page->pp->p.offset;
+ struct page_pool *pool = page_pool_to_pp(rx_buffer->page);
+ u32 hr = pool->p.offset;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->offset + hr, size, rx_buffer->truesize);
@@ -1067,7 +1068,8 @@ static void iavf_add_rx_frag(struct sk_buff *skb,
static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
unsigned int size)
{
- u32 hr = rx_buffer->page->pp->p.offset;
+ struct page_pool *pool = page_pool_to_pp(rx_buffer->page);
+ u32 hr = pool->p.offset;
struct sk_buff *skb;
void *va;
@@ -385,7 +385,8 @@ static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
if (unlikely(!rx_buf->page))
return;
- page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false);
+ page_pool_put_full_page(page_pool_to_pp(rx_buf->page), rx_buf->page,
+ false);
rx_buf->page = NULL;
rx_buf->offset = 0;
@@ -3097,7 +3098,8 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int size)
{
- u32 hr = rx_buf->page->pp->p.offset;
+ struct page_pool *pool = page_pool_to_pp(rx_buf->page);
+ u32 hr = pool->p.offset;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
rx_buf->offset + hr, size, rx_buf->truesize);
@@ -3129,8 +3131,10 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
if (!libeth_rx_sync_for_cpu(buf, copy))
return 0;
- dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset;
- src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset;
+ dst = page_address(hdr->page) + hdr->offset +
+ page_pool_to_pp(hdr->page)->p.offset;
+ src = page_address(buf->page) + buf->offset +
+ page_pool_to_pp(buf->page)->p.offset;
memcpy(dst, src, LARGEST_ALIGN(copy));
buf->offset += copy;
@@ -3148,7 +3152,7 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
*/
struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
{
- u32 hr = buf->page->pp->p.offset;
+ u32 hr = page_pool_to_pp(buf->page)->p.offset;
struct sk_buff *skb;
void *va;
@@ -207,7 +207,7 @@ EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, LIBETH);
*/
void libeth_rx_recycle_slow(struct page *page)
{
- page_pool_recycle_direct(page->pp, page);
+ page_pool_recycle_direct(page_pool_to_pp(page), page);
}
EXPORT_SYMBOL_NS_GPL(libeth_rx_recycle_slow, LIBETH);
@@ -716,7 +716,8 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
/* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
* as we know this is a page_pool page.
*/
- page_pool_recycle_direct(page->pp, page);
+ page_pool_recycle_direct(page_pool_to_pp(page),
+ page);
} while (++n < num);
break;
@@ -632,7 +632,8 @@ nsim_pp_hold_write(struct file *file, const char __user *data,
if (!ns->page)
ret = -ENOMEM;
} else {
- page_pool_put_full_page(ns->page->pp, ns->page, false);
+ page_pool_put_full_page(page_pool_to_pp(ns->page), ns->page,
+ false);
ns->page = NULL;
}
rtnl_unlock();
@@ -827,7 +828,8 @@ void nsim_destroy(struct netdevsim *ns)
/* Put this intentionally late to exercise the orphaning path */
if (ns->page) {
- page_pool_put_full_page(ns->page->pp, ns->page, false);
+ page_pool_put_full_page(page_pool_to_pp(ns->page), ns->page,
+ false);
ns->page = NULL;
}
@@ -1688,7 +1688,7 @@ static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
{
struct page *page = virt_to_head_page(buf);
- page_pool_put_full_page(page->pp, page, allow_direct);
+ page_pool_put_full_page(page_pool_to_pp(page), page, allow_direct);
}
static inline void *
@@ -137,7 +137,8 @@ static inline bool libeth_rx_sync_for_cpu(const struct libeth_fqe *fqe,
return false;
}
- page_pool_dma_sync_for_cpu(page->pp, page, fqe->offset, len);
+ page_pool_dma_sync_for_cpu(page_pool_to_pp(page), page, fqe->offset,
+ len);
return true;
}
@@ -83,6 +83,11 @@ static inline u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
}
#endif
+static inline struct page_pool *page_pool_to_pp(struct page *page)
+{
+ return page->pp;
+}
+
/**
* page_pool_dev_alloc_pages() - allocate a page.
* @pool: pool from which to allocate
@@ -1033,7 +1033,8 @@ bool napi_pp_put_page(netmem_ref netmem)
if (unlikely(!is_pp_netmem(netmem)))
return false;
- page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false);
+ page_pool_put_full_netmem(page_pool_to_pp(netmem_to_page(netmem)),
+ netmem, false);
return true;
}
@@ -384,7 +384,8 @@ void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
/* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
* as mem->type knows this a page_pool page
*/
- page_pool_put_full_page(page->pp, page, napi_direct);
+ page_pool_put_full_page(page_pool_to_pp(page), page,
+ napi_direct);
break;
case MEM_TYPE_PAGE_SHARED:
page_frag_free(data);
introduce page_pool_to_pp() API to avoid caller accessing page->pp directly. Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> --- drivers/net/ethernet/freescale/fec_main.c | 8 +++++--- .../net/ethernet/google/gve/gve_buffer_mgmt_dqo.c | 4 ++-- drivers/net/ethernet/intel/iavf/iavf_txrx.c | 6 ++++-- drivers/net/ethernet/intel/idpf/idpf_txrx.c | 14 +++++++++----- drivers/net/ethernet/intel/libeth/rx.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 3 ++- drivers/net/netdevsim/netdev.c | 6 ++++-- drivers/net/wireless/mediatek/mt76/mt76.h | 2 +- include/net/libeth/rx.h | 3 ++- include/net/page_pool/helpers.h | 5 +++++ net/core/skbuff.c | 3 ++- net/core/xdp.c | 3 ++- 12 files changed, 39 insertions(+), 20 deletions(-)