diff mbox series

[net,v2] net: mana: Switch to page pool for jumbo frames

Message ID 1742920357-27263-1-git-send-email-haiyangz@microsoft.com (mailing list archive)
State Accepted
Commit fa37a8849634db2dd3545116873da8cf4b1e67c6
Delegated to: Netdev Maintainers
Headers show
Series [net,v2] net: mana: Switch to page pool for jumbo frames | expand

Checks

Context Check Description
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Clearly marked for net
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag present in non-next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 13 of 13 maintainers
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 87 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2025-03-26--00-00 (tests: 896)

Commit Message

Haiyang Zhang March 25, 2025, 4:32 p.m. UTC
Frag allocators, such as netdev_alloc_frag(), were not designed to
work for fragsz > PAGE_SIZE.

So, switch to page pool for jumbo frames instead of using page frag
allocators. This driver is using page pool for smaller MTUs already.

Cc: stable@vger.kernel.org
Fixes: 80f6215b450e ("net: mana: Add support for jumbo frame")
Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
---
v2: updated the commit msg as suggested by Jakub Kicinski.

---
 drivers/net/ethernet/microsoft/mana/mana_en.c | 46 ++++---------------
 1 file changed, 9 insertions(+), 37 deletions(-)

Comments

Long Li March 25, 2025, 5:06 p.m. UTC | #1
> -----Original Message-----
> From: LKML haiyangz <lkmlhyz@microsoft.com> On Behalf Of Haiyang Zhang
> Sent: Tuesday, March 25, 2025 9:33 AM
> To: linux-hyperv@vger.kernel.org; netdev@vger.kernel.org
> Cc: Haiyang Zhang <haiyangz@microsoft.com>; Dexuan Cui
> <decui@microsoft.com>; stephen@networkplumber.org; KY Srinivasan
> <kys@microsoft.com>; Paul Rosswurm <paulros@microsoft.com>;
> olaf@aepfle.de; vkuznets <vkuznets@redhat.com>; davem@davemloft.net;
> wei.liu@kernel.org; edumazet@google.com; kuba@kernel.org;
> pabeni@redhat.com; leon@kernel.org; Long Li <longli@microsoft.com>;
> ssengar@linux.microsoft.com; linux-rdma@vger.kernel.org;
> daniel@iogearbox.net; john.fastabend@gmail.com; bpf@vger.kernel.org;
> ast@kernel.org; hawk@kernel.org; tglx@linutronix.de;
> shradhagupta@linux.microsoft.com; jesse.brandeburg@intel.com;
> andrew+netdev@lunn.ch; linux-kernel@vger.kernel.org; stable@vger.kernel.org
> Subject: [PATCH net,v2] net: mana: Switch to page pool for jumbo frames
> 
> Frag allocators, such as netdev_alloc_frag(), were not designed to work for
> fragsz > PAGE_SIZE.
> 
> So, switch to page pool for jumbo frames instead of using page frag allocators.
> This driver is using page pool for smaller MTUs already.
> 
> Cc: stable@vger.kernel.org
> Fixes: 80f6215b450e ("net: mana: Add support for jumbo frame")
> Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
> ---
> v2: updated the commit msg as suggested by Jakub Kicinski.
> 
> ---
>  drivers/net/ethernet/microsoft/mana/mana_en.c | 46 ++++---------------
>  1 file changed, 9 insertions(+), 37 deletions(-)
> 
> diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c
> b/drivers/net/ethernet/microsoft/mana/mana_en.c
> index 9a8171f099b6..4d41f4cca3d8 100644
> --- a/drivers/net/ethernet/microsoft/mana/mana_en.c
> +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
> @@ -661,30 +661,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context
> *mpc, int new_mtu, int num_qu
>  	mpc->rxbpre_total = 0;
> 
>  	for (i = 0; i < num_rxb; i++) {
> -		if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
> -			va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
> -			if (!va)
> -				goto error;
> -
> -			page = virt_to_head_page(va);
> -			/* Check if the frag falls back to single page */
> -			if (compound_order(page) <
> -			    get_order(mpc->rxbpre_alloc_size)) {
> -				put_page(page);
> -				goto error;
> -			}
> -		} else {
> -			page = dev_alloc_page();
> -			if (!page)
> -				goto error;
> +		page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
> +		if (!page)
> +			goto error;
> 
> -			va = page_to_virt(page);
> -		}
> +		va = page_to_virt(page);
> 
>  		da = dma_map_single(dev, va + mpc->rxbpre_headroom,
>  				    mpc->rxbpre_datasize, DMA_FROM_DEVICE);
>  		if (dma_mapping_error(dev, da)) {
> -			put_page(virt_to_head_page(va));
> +			put_page(page);

Should we use __free_pages()?
Haiyang Zhang March 25, 2025, 6:11 p.m. UTC | #2
> -----Original Message-----
> From: Long Li <longli@microsoft.com>
> Sent: Tuesday, March 25, 2025 1:06 PM
> To: Haiyang Zhang <haiyangz@microsoft.com>; linux-hyperv@vger.kernel.org;
> netdev@vger.kernel.org
> Cc: Dexuan Cui <decui@microsoft.com>; stephen@networkplumber.org; KY
> Srinivasan <kys@microsoft.com>; Paul Rosswurm <paulros@microsoft.com>;
> olaf@aepfle.de; vkuznets <vkuznets@redhat.com>; davem@davemloft.net;
> wei.liu@kernel.org; edumazet@google.com; kuba@kernel.org;
> pabeni@redhat.com; leon@kernel.org; ssengar@linux.microsoft.com; linux-
> rdma@vger.kernel.org; daniel@iogearbox.net; john.fastabend@gmail.com;
> bpf@vger.kernel.org; ast@kernel.org; hawk@kernel.org; tglx@linutronix.de;
> shradhagupta@linux.microsoft.com; jesse.brandeburg@intel.com;
> andrew+netdev@lunn.ch; linux-kernel@vger.kernel.org;
> stable@vger.kernel.org
> Subject: RE: [PATCH net,v2] net: mana: Switch to page pool for jumbo
> frames
> 
> 
> 
> > -----Original Message-----
> > From: LKML haiyangz <lkmlhyz@microsoft.com> On Behalf Of Haiyang Zhang
> > Sent: Tuesday, March 25, 2025 9:33 AM
> > To: linux-hyperv@vger.kernel.org; netdev@vger.kernel.org
> > Cc: Haiyang Zhang <haiyangz@microsoft.com>; Dexuan Cui
> > <decui@microsoft.com>; stephen@networkplumber.org; KY Srinivasan
> > <kys@microsoft.com>; Paul Rosswurm <paulros@microsoft.com>;
> > olaf@aepfle.de; vkuznets <vkuznets@redhat.com>; davem@davemloft.net;
> > wei.liu@kernel.org; edumazet@google.com; kuba@kernel.org;
> > pabeni@redhat.com; leon@kernel.org; Long Li <longli@microsoft.com>;
> > ssengar@linux.microsoft.com; linux-rdma@vger.kernel.org;
> > daniel@iogearbox.net; john.fastabend@gmail.com; bpf@vger.kernel.org;
> > ast@kernel.org; hawk@kernel.org; tglx@linutronix.de;
> > shradhagupta@linux.microsoft.com; jesse.brandeburg@intel.com;
> > andrew+netdev@lunn.ch; linux-kernel@vger.kernel.org;
> stable@vger.kernel.org
> > Subject: [PATCH net,v2] net: mana: Switch to page pool for jumbo frames
> >
> > Frag allocators, such as netdev_alloc_frag(), were not designed to work
> for
> > fragsz > PAGE_SIZE.
> >
> > So, switch to page pool for jumbo frames instead of using page frag
> allocators.
> > This driver is using page pool for smaller MTUs already.
> >
> > Cc: stable@vger.kernel.org
> > Fixes: 80f6215b450e ("net: mana: Add support for jumbo frame")
> > Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
> > ---
> > v2: updated the commit msg as suggested by Jakub Kicinski.
> >
> > ---
> >  drivers/net/ethernet/microsoft/mana/mana_en.c | 46 ++++---------------
> >  1 file changed, 9 insertions(+), 37 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c
> > b/drivers/net/ethernet/microsoft/mana/mana_en.c
> > index 9a8171f099b6..4d41f4cca3d8 100644
> > --- a/drivers/net/ethernet/microsoft/mana/mana_en.c
> > +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
> > @@ -661,30 +661,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context
> > *mpc, int new_mtu, int num_qu
> >  	mpc->rxbpre_total = 0;
> >
> >  	for (i = 0; i < num_rxb; i++) {
> > -		if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
> > -			va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
> > -			if (!va)
> > -				goto error;
> > -
> > -			page = virt_to_head_page(va);
> > -			/* Check if the frag falls back to single page */
> > -			if (compound_order(page) <
> > -			    get_order(mpc->rxbpre_alloc_size)) {
> > -				put_page(page);
> > -				goto error;
> > -			}
> > -		} else {
> > -			page = dev_alloc_page();
> > -			if (!page)
> > -				goto error;
> > +		page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
> > +		if (!page)
> > +			goto error;
> >
> > -			va = page_to_virt(page);
> > -		}
> > +		va = page_to_virt(page);
> >
> >  		da = dma_map_single(dev, va + mpc->rxbpre_headroom,
> >  				    mpc->rxbpre_datasize, DMA_FROM_DEVICE);
> >  		if (dma_mapping_error(dev, da)) {
> > -			put_page(virt_to_head_page(va));
> > +			put_page(page);
> 
> Should we use __free_pages()?

Quote from doc: https://www.kernel.org/doc/html/next/core-api/mm-api.html
___free_pages():
"This function can free multi-page allocations that are not compound pages."
"If you want to use the page's reference count to decide when to free the 
allocation, you should allocate a compound page, and use put_page() instead 
of __free_pages()."

And, since dev_alloc_pages returns compound page for high order page, we 
use put_page() which works for both compound & single page.

Thanks,
- Haiyang
Long Li March 25, 2025, 6:32 p.m. UTC | #3
> > > Subject: [PATCH net,v2] net: mana: Switch to page pool for jumbo
> > > frames
> > >
> > > Frag allocators, such as netdev_alloc_frag(), were not designed to
> > > work
> > for
> > > fragsz > PAGE_SIZE.
> > >
> > > So, switch to page pool for jumbo frames instead of using page frag
> > allocators.
> > > This driver is using page pool for smaller MTUs already.
> > >
> > > Cc: stable@vger.kernel.org
> > > Fixes: 80f6215b450e ("net: mana: Add support for jumbo frame")
> > > Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>

Reviewed-by: Long Li <longli@microsoft.com>

> > > ---
> > > v2: updated the commit msg as suggested by Jakub Kicinski.
> > >
> > > ---
> > >  drivers/net/ethernet/microsoft/mana/mana_en.c | 46
> > > ++++---------------
> > >  1 file changed, 9 insertions(+), 37 deletions(-)
> > >
> > > diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c
> > > b/drivers/net/ethernet/microsoft/mana/mana_en.c
> > > index 9a8171f099b6..4d41f4cca3d8 100644
> > > --- a/drivers/net/ethernet/microsoft/mana/mana_en.c
> > > +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
> > > @@ -661,30 +661,16 @@ int mana_pre_alloc_rxbufs(struct
> > > mana_port_context *mpc, int new_mtu, int num_qu
> > >   mpc->rxbpre_total = 0;
> > >
> > >   for (i = 0; i < num_rxb; i++) {
> > > -         if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
> > > -                 va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
> > > -                 if (!va)
> > > -                         goto error;
> > > -
> > > -                 page = virt_to_head_page(va);
> > > -                 /* Check if the frag falls back to single page */
> > > -                 if (compound_order(page) <
> > > -                     get_order(mpc->rxbpre_alloc_size)) {
> > > -                         put_page(page);
> > > -                         goto error;
> > > -                 }
> > > -         } else {
> > > -                 page = dev_alloc_page();
> > > -                 if (!page)
> > > -                         goto error;
> > > +         page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
> > > +         if (!page)
> > > +                 goto error;
> > >
> > > -                 va = page_to_virt(page);
> > > -         }
> > > +         va = page_to_virt(page);
> > >
> > >           da = dma_map_single(dev, va + mpc->rxbpre_headroom,
> > >                               mpc->rxbpre_datasize, DMA_FROM_DEVICE);
> > >           if (dma_mapping_error(dev, da)) {
> > > -                 put_page(virt_to_head_page(va));
> > > +                 put_page(page);
> >
> > Should we use __free_pages()?
>
> Quote from doc:
> https://www.ker/
> nel.org%2Fdoc%2Fhtml%2Fnext%2Fcore-api%2Fmm-
> api.html&data=05%7C02%7Clongli%40microsoft.com%7Cada2b7bad76e4ab7286
> 508dd6bc87430%7C72f988bf86f141af91ab2d7cd011db47%7C1%7C0%7C638785
> 230869082534%7CUnknown%7CTWFpbGZsb3d8eyJFbXB0eU1hcGkiOnRydWUsIl
> YiOiIwLjAuMDAwMCIsIlAiOiJXaW4zMiIsIkFOIjoiTWFpbCIsIldUIjoyfQ%3D%3D%7C
> 0%7C%7C%7C&sdata=VINKfrv80MzhE1mmibv1RrRz4WCmr%2BZhWDf1ZaOv47
> w%3D&reserved=0
> ___free_pages():
> "This function can free multi-page allocations that are not compound pages."
> "If you want to use the page's reference count to decide when to free the
> allocation, you should allocate a compound page, and use put_page() instead of
> __free_pages()."
>
> And, since dev_alloc_pages returns compound page for high order page, we use
> put_page() which works for both compound & single page.
>
> Thanks,
> - Haiyang
Shradha Gupta March 27, 2025, 5:01 a.m. UTC | #4
On Tue, Mar 25, 2025 at 09:32:37AM -0700, Haiyang Zhang wrote:
> Frag allocators, such as netdev_alloc_frag(), were not designed to
> work for fragsz > PAGE_SIZE.
> 
> So, switch to page pool for jumbo frames instead of using page frag
> allocators. This driver is using page pool for smaller MTUs already.
> 
> Cc: stable@vger.kernel.org
> Fixes: 80f6215b450e ("net: mana: Add support for jumbo frame")
> Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
> ---
> v2: updated the commit msg as suggested by Jakub Kicinski.
> 
> ---
>  drivers/net/ethernet/microsoft/mana/mana_en.c | 46 ++++---------------
>  1 file changed, 9 insertions(+), 37 deletions(-)
> 
> diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
> index 9a8171f099b6..4d41f4cca3d8 100644
> --- a/drivers/net/ethernet/microsoft/mana/mana_en.c
> +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
> @@ -661,30 +661,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
>  	mpc->rxbpre_total = 0;
>  
>  	for (i = 0; i < num_rxb; i++) {
> -		if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
> -			va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
> -			if (!va)
> -				goto error;
> -
> -			page = virt_to_head_page(va);
> -			/* Check if the frag falls back to single page */
> -			if (compound_order(page) <
> -			    get_order(mpc->rxbpre_alloc_size)) {
> -				put_page(page);
> -				goto error;
> -			}
> -		} else {
> -			page = dev_alloc_page();
> -			if (!page)
> -				goto error;
> +		page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
> +		if (!page)
> +			goto error;
>  
> -			va = page_to_virt(page);
> -		}
> +		va = page_to_virt(page);
>  
>  		da = dma_map_single(dev, va + mpc->rxbpre_headroom,
>  				    mpc->rxbpre_datasize, DMA_FROM_DEVICE);
>  		if (dma_mapping_error(dev, da)) {
> -			put_page(virt_to_head_page(va));
> +			put_page(page);
>  			goto error;
>  		}
>  
> @@ -1672,7 +1658,7 @@ static void mana_rx_skb(void *buf_va, bool from_pool,
>  }
>  
>  static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
> -			     dma_addr_t *da, bool *from_pool, bool is_napi)
> +			     dma_addr_t *da, bool *from_pool)
>  {
>  	struct page *page;
>  	void *va;
> @@ -1683,21 +1669,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
>  	if (rxq->xdp_save_va) {
>  		va = rxq->xdp_save_va;
>  		rxq->xdp_save_va = NULL;
> -	} else if (rxq->alloc_size > PAGE_SIZE) {
> -		if (is_napi)
> -			va = napi_alloc_frag(rxq->alloc_size);
> -		else
> -			va = netdev_alloc_frag(rxq->alloc_size);
> -
> -		if (!va)
> -			return NULL;
> -
> -		page = virt_to_head_page(va);
> -		/* Check if the frag falls back to single page */
> -		if (compound_order(page) < get_order(rxq->alloc_size)) {
> -			put_page(page);
> -			return NULL;
> -		}
>  	} else {
>  		page = page_pool_dev_alloc_pages(rxq->page_pool);
>  		if (!page)
> @@ -1730,7 +1701,7 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
>  	dma_addr_t da;
>  	void *va;
>  
> -	va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
> +	va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
>  	if (!va)
>  		return;
>  
> @@ -2172,7 +2143,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
>  	if (mpc->rxbufs_pre)
>  		va = mana_get_rxbuf_pre(rxq, &da);
>  	else
> -		va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
> +		va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
>  
>  	if (!va)
>  		return -ENOMEM;
> @@ -2258,6 +2229,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
>  	pprm.nid = gc->numa_node;
>  	pprm.napi = &rxq->rx_cq.napi;
>  	pprm.netdev = rxq->ndev;
> +	pprm.order = get_order(rxq->alloc_size);
>  
>  	rxq->page_pool = page_pool_create(&pprm);
>  
> -- 
Reviewed-by: Shradha Gupta <shradhagupta@linux.microsoft.com>
> 2.34.1
patchwork-bot+netdevbpf@kernel.org March 28, 2025, 1:50 p.m. UTC | #5
Hello:

This patch was applied to netdev/net.git (main)
by Jakub Kicinski <kuba@kernel.org>:

On Tue, 25 Mar 2025 09:32:37 -0700 you wrote:
> Frag allocators, such as netdev_alloc_frag(), were not designed to
> work for fragsz > PAGE_SIZE.
> 
> So, switch to page pool for jumbo frames instead of using page frag
> allocators. This driver is using page pool for smaller MTUs already.
> 
> Cc: stable@vger.kernel.org
> Fixes: 80f6215b450e ("net: mana: Add support for jumbo frame")
> Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
> 
> [...]

Here is the summary with links:
  - [net,v2] net: mana: Switch to page pool for jumbo frames
    https://git.kernel.org/netdev/net/c/fa37a8849634

You are awesome, thank you!
diff mbox series

Patch

diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 9a8171f099b6..4d41f4cca3d8 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -661,30 +661,16 @@  int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
 	mpc->rxbpre_total = 0;
 
 	for (i = 0; i < num_rxb; i++) {
-		if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
-			va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
-			if (!va)
-				goto error;
-
-			page = virt_to_head_page(va);
-			/* Check if the frag falls back to single page */
-			if (compound_order(page) <
-			    get_order(mpc->rxbpre_alloc_size)) {
-				put_page(page);
-				goto error;
-			}
-		} else {
-			page = dev_alloc_page();
-			if (!page)
-				goto error;
+		page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
+		if (!page)
+			goto error;
 
-			va = page_to_virt(page);
-		}
+		va = page_to_virt(page);
 
 		da = dma_map_single(dev, va + mpc->rxbpre_headroom,
 				    mpc->rxbpre_datasize, DMA_FROM_DEVICE);
 		if (dma_mapping_error(dev, da)) {
-			put_page(virt_to_head_page(va));
+			put_page(page);
 			goto error;
 		}
 
@@ -1672,7 +1658,7 @@  static void mana_rx_skb(void *buf_va, bool from_pool,
 }
 
 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
-			     dma_addr_t *da, bool *from_pool, bool is_napi)
+			     dma_addr_t *da, bool *from_pool)
 {
 	struct page *page;
 	void *va;
@@ -1683,21 +1669,6 @@  static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
 	if (rxq->xdp_save_va) {
 		va = rxq->xdp_save_va;
 		rxq->xdp_save_va = NULL;
-	} else if (rxq->alloc_size > PAGE_SIZE) {
-		if (is_napi)
-			va = napi_alloc_frag(rxq->alloc_size);
-		else
-			va = netdev_alloc_frag(rxq->alloc_size);
-
-		if (!va)
-			return NULL;
-
-		page = virt_to_head_page(va);
-		/* Check if the frag falls back to single page */
-		if (compound_order(page) < get_order(rxq->alloc_size)) {
-			put_page(page);
-			return NULL;
-		}
 	} else {
 		page = page_pool_dev_alloc_pages(rxq->page_pool);
 		if (!page)
@@ -1730,7 +1701,7 @@  static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
 	dma_addr_t da;
 	void *va;
 
-	va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
+	va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
 	if (!va)
 		return;
 
@@ -2172,7 +2143,7 @@  static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
 	if (mpc->rxbufs_pre)
 		va = mana_get_rxbuf_pre(rxq, &da);
 	else
-		va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
+		va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
 
 	if (!va)
 		return -ENOMEM;
@@ -2258,6 +2229,7 @@  static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
 	pprm.nid = gc->numa_node;
 	pprm.napi = &rxq->rx_cq.napi;
 	pprm.netdev = rxq->ndev;
+	pprm.order = get_order(rxq->alloc_size);
 
 	rxq->page_pool = page_pool_create(&pprm);