@@ -4925,8 +4925,7 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv)
static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
{
struct page_pool_params pp_params = {
- .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG |
- PP_FLAG_DMA_SYNC_DEV,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.order = hns3_page_order(ring),
.pool_size = ring->desc_num * hns3_buf_size(ring) /
(PAGE_SIZE << hns3_page_order(ring)),
@@ -1426,7 +1426,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
return 0;
}
- pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
+ pp_params.flags = PP_FLAG_DMA_MAP;
pp_params.pool_size = numptrs;
pp_params.nid = NUMA_NO_NODE;
pp_params.dev = pfvf->dev;
@@ -853,7 +853,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct page_pool_params pp_params = { 0 };
pp_params.order = 0;
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | PP_FLAG_PAGE_FRAG;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = pool_size;
pp_params.nid = node;
pp_params.dev = rq->pdev;
@@ -566,7 +566,7 @@ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
{
struct page_pool_params pp_params = {
.order = 0,
- .flags = PP_FLAG_PAGE_FRAG,
+ .flags = 0,
.nid = NUMA_NO_NODE,
.dev = dev->dma_dev,
};
@@ -45,10 +45,8 @@
* Please note DMA-sync-for-CPU is still
* device driver responsibility
*/
-#define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */
#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
- PP_FLAG_DMA_SYNC_DEV |\
- PP_FLAG_PAGE_FRAG)
+ PP_FLAG_DMA_SYNC_DEV)
#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
(sizeof(dma_addr_t) > sizeof(unsigned long))
@@ -177,10 +177,6 @@ static int page_pool_init(struct page_pool *pool,
*/
}
- if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
- pool->p.flags & PP_FLAG_PAGE_FRAG)
- return -EINVAL;
-
#ifdef CONFIG_PAGE_POOL_STATS
pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
if (!pool->recycle_stats)
@@ -5650,7 +5650,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
/* In general, avoid mixing page_pool and non-page_pool allocated
* pages within the same SKB. Additionally avoid dealing with clones
* with page_pool pages, in case the SKB is using page_pool fragment
- * references (PP_FLAG_PAGE_FRAG). Since we only take full page
+ * references (page_pool_alloc_frag()). Since we only take full page
* references for cloned SKBs at the moment that would result in
* inconsistent reference counts.
* In theory we could take full references if @from is cloned and
PP_FLAG_PAGE_FRAG is not really needed after non-frag and frag page support is unified, so remove it. An extra benefit is that drivers don't need to handle the failback to use page_pool_alloc_pages() API for arch with PAGE_POOL_DMA_USE_PP_FRAG_COUNT being true when using page_pool_alloc_frag() API. Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> CC: Lorenzo Bianconi <lorenzo@kernel.org> CC: Alexander Duyck <alexander.duyck@gmail.com> --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 3 +-- drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- drivers/net/wireless/mediatek/mt76/mac80211.c | 2 +- include/net/page_pool.h | 4 +--- net/core/page_pool.c | 4 ---- net/core/skbuff.c | 2 +- 7 files changed, 6 insertions(+), 13 deletions(-)