diff mbox series

[net-next,v5,5/5] mvneta: recycle buffers

Message ID 20210513165846.23722-6-mcroce@linux.microsoft.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series page_pool: recycle buffers | expand

Checks

Context Check Description
netdev/apply fail Patch does not apply to net-next
netdev/tree_selection success Clearly marked for net-next

Commit Message

Matteo Croce May 13, 2021, 4:58 p.m. UTC
From: Matteo Croce <mcroce@microsoft.com>

Use the new recycling API for page_pool.
In a drop rate test, the packet rate increased di 10%,
from 269 Kpps to 296 Kpps.

perf top on a stock system shows:

Overhead  Shared Object     Symbol
  21.78%  [kernel]          [k] __pi___inval_dcache_area
  21.66%  [mvneta]          [k] mvneta_rx_swbm
   7.00%  [kernel]          [k] kmem_cache_alloc
   6.05%  [kernel]          [k] eth_type_trans
   4.44%  [kernel]          [k] kmem_cache_free.part.0
   3.80%  [kernel]          [k] __netif_receive_skb_core
   3.68%  [kernel]          [k] dev_gro_receive
   3.65%  [kernel]          [k] get_page_from_freelist
   3.43%  [kernel]          [k] page_pool_release_page
   3.35%  [kernel]          [k] free_unref_page

And this is the same output with recycling enabled:

Overhead  Shared Object     Symbol
  24.10%  [kernel]          [k] __pi___inval_dcache_area
  23.02%  [mvneta]          [k] mvneta_rx_swbm
   7.19%  [kernel]          [k] kmem_cache_alloc
   6.50%  [kernel]          [k] eth_type_trans
   4.93%  [kernel]          [k] __netif_receive_skb_core
   4.77%  [kernel]          [k] kmem_cache_free.part.0
   3.93%  [kernel]          [k] dev_gro_receive
   3.03%  [kernel]          [k] build_skb
   2.91%  [kernel]          [k] page_pool_put_page
   2.85%  [kernel]          [k] __xdp_return

The test was done with mausezahn on the TX side with 64 byte raw
ethernet frames.

Signed-off-by: Matteo Croce <mcroce@microsoft.com>
---
 drivers/net/ethernet/marvell/mvneta.c | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

Comments

Russell King (Oracle) May 13, 2021, 6:25 p.m. UTC | #1
On Thu, May 13, 2021 at 06:58:46PM +0200, Matteo Croce wrote:
> From: Matteo Croce <mcroce@microsoft.com>
> 
> Use the new recycling API for page_pool.
> In a drop rate test, the packet rate increased di 10%,

Typo - "by" ?

> from 269 Kpps to 296 Kpps.
> 
> perf top on a stock system shows:
> 
> Overhead  Shared Object     Symbol
>   21.78%  [kernel]          [k] __pi___inval_dcache_area
>   21.66%  [mvneta]          [k] mvneta_rx_swbm
>    7.00%  [kernel]          [k] kmem_cache_alloc
>    6.05%  [kernel]          [k] eth_type_trans
>    4.44%  [kernel]          [k] kmem_cache_free.part.0
>    3.80%  [kernel]          [k] __netif_receive_skb_core
>    3.68%  [kernel]          [k] dev_gro_receive
>    3.65%  [kernel]          [k] get_page_from_freelist
>    3.43%  [kernel]          [k] page_pool_release_page
>    3.35%  [kernel]          [k] free_unref_page
> 
> And this is the same output with recycling enabled:
> 
> Overhead  Shared Object     Symbol
>   24.10%  [kernel]          [k] __pi___inval_dcache_area
>   23.02%  [mvneta]          [k] mvneta_rx_swbm
>    7.19%  [kernel]          [k] kmem_cache_alloc
>    6.50%  [kernel]          [k] eth_type_trans
>    4.93%  [kernel]          [k] __netif_receive_skb_core
>    4.77%  [kernel]          [k] kmem_cache_free.part.0
>    3.93%  [kernel]          [k] dev_gro_receive
>    3.03%  [kernel]          [k] build_skb
>    2.91%  [kernel]          [k] page_pool_put_page
>    2.85%  [kernel]          [k] __xdp_return
> 
> The test was done with mausezahn on the TX side with 64 byte raw
> ethernet frames.
> 
> Signed-off-by: Matteo Croce <mcroce@microsoft.com>

Other than the typo, I have no objection to the patch.

Acked-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>

> ---
>  drivers/net/ethernet/marvell/mvneta.c | 11 +++++++----
>  1 file changed, 7 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
> index 7d5cd9bc6c99..6d2f8dce4900 100644
> --- a/drivers/net/ethernet/marvell/mvneta.c
> +++ b/drivers/net/ethernet/marvell/mvneta.c
> @@ -2320,7 +2320,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
>  }
>  
>  static struct sk_buff *
> -mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
> +mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
>  		      struct xdp_buff *xdp, u32 desc_status)
>  {
>  	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
> @@ -2331,7 +2331,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
>  	if (!skb)
>  		return ERR_PTR(-ENOMEM);
>  
> -	page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data));
> +	skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool);
>  
>  	skb_reserve(skb, xdp->data - xdp->data_hard_start);
>  	skb_put(skb, xdp->data_end - xdp->data);
> @@ -2343,7 +2343,10 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
>  		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
>  				skb_frag_page(frag), skb_frag_off(frag),
>  				skb_frag_size(frag), PAGE_SIZE);
> -		page_pool_release_page(rxq->page_pool, skb_frag_page(frag));
> +		/* We don't need to reset pp_recycle here. It's already set, so
> +		 * just mark fragments for recycling.
> +		 */
> +		page_pool_store_mem_info(skb_frag_page(frag), pool);
>  	}
>  
>  	return skb;
> @@ -2425,7 +2428,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
>  		    mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
>  			goto next;
>  
> -		skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status);
> +		skb = mvneta_swbm_build_skb(pp, pp, &xdp_buf, desc_status);
>  		if (IS_ERR(skb)) {
>  			struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
>  
> -- 
> 2.31.1
> 
>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 7d5cd9bc6c99..6d2f8dce4900 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2320,7 +2320,7 @@  mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
 }
 
 static struct sk_buff *
-mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
 		      struct xdp_buff *xdp, u32 desc_status)
 {
 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
@@ -2331,7 +2331,7 @@  mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 	if (!skb)
 		return ERR_PTR(-ENOMEM);
 
-	page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data));
+	skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool);
 
 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
 	skb_put(skb, xdp->data_end - xdp->data);
@@ -2343,7 +2343,10 @@  mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 				skb_frag_page(frag), skb_frag_off(frag),
 				skb_frag_size(frag), PAGE_SIZE);
-		page_pool_release_page(rxq->page_pool, skb_frag_page(frag));
+		/* We don't need to reset pp_recycle here. It's already set, so
+		 * just mark fragments for recycling.
+		 */
+		page_pool_store_mem_info(skb_frag_page(frag), pool);
 	}
 
 	return skb;
@@ -2425,7 +2428,7 @@  static int mvneta_rx_swbm(struct napi_struct *napi,
 		    mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
 			goto next;
 
-		skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status);
+		skb = mvneta_swbm_build_skb(pp, pp, &xdp_buf, desc_status);
 		if (IS_ERR(skb)) {
 			struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);