diff mbox series

[v3,net-next,2/5] net: page_pool: add bulk support for ptr_ring

Message ID b8638a44f1aee8feb3a1f6b949653e2125eb0867.1604484917.git.lorenzo@kernel.org (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series xdp: introduce bulking for page_pool tx return path | expand

Commit Message

Lorenzo Bianconi Nov. 4, 2020, 10:22 a.m. UTC
Introduce the capability to batch page_pool ptr_ring refill since it is
usually run inside the driver NAPI tx completion loop.

Suggested-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 include/net/page_pool.h | 26 ++++++++++++++++++++++++++
 net/core/page_pool.c    | 35 +++++++++++++++++++++++++++++++++++
 net/core/xdp.c          |  9 ++-------
 3 files changed, 63 insertions(+), 7 deletions(-)

Comments

Jesper Dangaard Brouer Nov. 4, 2020, 12:24 p.m. UTC | #1
On Wed,  4 Nov 2020 11:22:55 +0100 Lorenzo Bianconi <lorenzo@kernel.org> wrote:

> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index ef98372facf6..236c5ed3aa66 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
[...]
> @@ -408,6 +410,39 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
>  }
>  EXPORT_SYMBOL(page_pool_put_page);
>  
> +void page_pool_put_page_bulk(struct page_pool *pool, void **data,
> +			     int count)
> +{
> +	int i, len = 0;
> +
> +	for (i = 0; i < count; i++) {
> +		struct page *page = virt_to_head_page(data[i]);
> +
> +		if (likely(page_ref_count(page) == 1 &&
> +			   pool_page_reusable(pool, page))) {
> +			if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
> +				page_pool_dma_sync_for_device(pool, page, -1);
> +
> +			/* bulk pages for ptr_ring cache */
> +			data[len++] = page;
> +		} else {
> +			page_pool_release_page(pool, page);
> +			put_page(page);
> +		}
> +	}
> +
> +	/* Grab the producer spinlock for concurrent access to
> +	 * ptr_ring page_pool cache
> +	 */
> +	page_pool_ring_lock(pool);
> +	for (i = 0; i < len; i++) {
> +		if (__ptr_ring_produce(&pool->ring, data[i]))
> +			page_pool_return_page(pool, data[i]);
> +	}
> +	page_pool_ring_unlock(pool);
> +}
> +EXPORT_SYMBOL(page_pool_put_page_bulk);

I don't like that you are replicating the core logic from
page_pool_put_page() in this function.  This means that we as
maintainers need to keep both of this places up-to-date.

Let me try to re-implement this, while sharing the refcnt logic:
(completely untested, not even compiled)

---
 net/core/page_pool.c |   58 +++++++++++++++++++++++++++++++++++++++++++-------
 2 files changed, 51 insertions(+), 9 deletions(-)

diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index ef98372facf6..c785e9825a0d 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -362,8 +362,9 @@ static bool pool_page_reusable(struct page_pool *pool, struct page *page)
  * If the page refcnt != 1, then the page will be returned to memory
  * subsystem.
  */
-void page_pool_put_page(struct page_pool *pool, struct page *page,
-			unsigned int dma_sync_size, bool allow_direct)
+static struct page*
+__page_pool_put_page(struct page_pool *pool, struct page *page,
+		     unsigned int dma_sync_size, bool allow_direct)
 {
 	/* This allocator is optimized for the XDP mode that uses
 	 * one-frame-per-page, but have fallbacks that act like the
@@ -381,13 +382,10 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
 
 		if (allow_direct && in_serving_softirq())
 			if (page_pool_recycle_in_cache(page, pool))
-				return;
+				return NULL;
 
-		if (!page_pool_recycle_in_ring(pool, page)) {
-			/* Cache full, fallback to free pages */
-			page_pool_return_page(pool, page);
-		}
-		return;
+		/* Page found as candidate for recycling */
+		return page;
 	}
 	/* Fallback/non-XDP mode: API user have elevated refcnt.
 	 *
@@ -405,9 +403,53 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
 	/* Do not replace this with page_pool_return_page() */
 	page_pool_release_page(pool, page);
 	put_page(page);
+	return NULL;
+}
+
+void page_pool_put_page(struct page_pool *pool, struct page *page,
+			unsigned int dma_sync_size, bool allow_direct)
+{
+	page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
+
+	if (page && !page_pool_recycle_in_ring(pool, page)) {
+		/* Cache full, fallback to free pages */
+		page_pool_return_page(pool, page);
+	}
 }
 EXPORT_SYMBOL(page_pool_put_page);
 
+/* Caller must not use data area after call, as this function overwrites it */
+void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count)
+{
+	int i, len = 0, len2 = 0;
+
+	for (i = 0; i < count; i++) {
+		struct page *page = virt_to_head_page(data[i]);
+
+		page = __page_pool_put_page(pool, page, -1 , false);
+
+		/* Approved for recycling for ptr_ring cache */
+		if (page)
+			data[len++] = page;
+	}
+
+	/* Bulk producer into ptr_ring page_pool cache */
+	page_pool_ring_lock(pool);
+	for (i = 0; i < len; i++) {
+		if (__ptr_ring_produce(&pool->ring, data[i]))
+			data[len2++] = data[i];
+	}
+	page_pool_ring_unlock(pool);
+
+	/* Unlikely case of ptr_ring cache full, free pages outside producer
+	 * lock, given put_page() with refcnt==1 can be an expensive operation.
+	 */
+	for (i = 0; i < len2; i++) {
+		page_pool_return_page(pool, data[i]);
+	}
+}
+EXPORT_SYMBOL(page_pool_put_page_bulk);
+
 static void page_pool_empty_ring(struct page_pool *pool)
 {
 	struct page *page;
Lorenzo Bianconi Nov. 4, 2020, 2:49 p.m. UTC | #2
> > diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> > index ef98372facf6..236c5ed3aa66 100644
> > --- a/net/core/page_pool.c
> > +++ b/net/core/page_pool.c
> [...]
> > @@ -408,6 +410,39 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
> >  }
> >  EXPORT_SYMBOL(page_pool_put_page);
> >  
> > +void page_pool_put_page_bulk(struct page_pool *pool, void **data,
> > +			     int count)
> > +{
> > +	int i, len = 0;
> > +
> > +	for (i = 0; i < count; i++) {
> > +		struct page *page = virt_to_head_page(data[i]);
> > +
> > +		if (likely(page_ref_count(page) == 1 &&
> > +			   pool_page_reusable(pool, page))) {
> > +			if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
> > +				page_pool_dma_sync_for_device(pool, page, -1);
> > +
> > +			/* bulk pages for ptr_ring cache */
> > +			data[len++] = page;
> > +		} else {
> > +			page_pool_release_page(pool, page);
> > +			put_page(page);
> > +		}
> > +	}
> > +
> > +	/* Grab the producer spinlock for concurrent access to
> > +	 * ptr_ring page_pool cache
> > +	 */
> > +	page_pool_ring_lock(pool);
> > +	for (i = 0; i < len; i++) {
> > +		if (__ptr_ring_produce(&pool->ring, data[i]))
> > +			page_pool_return_page(pool, data[i]);
> > +	}
> > +	page_pool_ring_unlock(pool);
> > +}
> > +EXPORT_SYMBOL(page_pool_put_page_bulk);
> 
> I don't like that you are replicating the core logic from
> page_pool_put_page() in this function.  This means that we as
> maintainers need to keep both of this places up-to-date.
> 
> Let me try to re-implement this, while sharing the refcnt logic:
> (completely untested, not even compiled)

ack, I like the approach below, I will integrate it in v4

> 
> ---
>  net/core/page_pool.c |   58 +++++++++++++++++++++++++++++++++++++++++++-------
>  2 files changed, 51 insertions(+), 9 deletions(-)
> 
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index ef98372facf6..c785e9825a0d 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -362,8 +362,9 @@ static bool pool_page_reusable(struct page_pool *pool, struct page *page)
>   * If the page refcnt != 1, then the page will be returned to memory
>   * subsystem.

[...]

>  
> +/* Caller must not use data area after call, as this function overwrites it */
> +void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count)
> +{
> +	int i, len = 0, len2 = 0;
> +
> +	for (i = 0; i < count; i++) {
> +		struct page *page = virt_to_head_page(data[i]);
> +
> +		page = __page_pool_put_page(pool, page, -1 , false);
> +
> +		/* Approved for recycling for ptr_ring cache */
> +		if (page)
> +			data[len++] = page;
> +	}

I guess we just return here if len is 0 since we will avoid to grab the
ptr_ring lock, agree?

Regards,
Lorenzo

> +
> +	/* Bulk producer into ptr_ring page_pool cache */
> +	page_pool_ring_lock(pool);
> +	for (i = 0; i < len; i++) {
> +		if (__ptr_ring_produce(&pool->ring, data[i]))
> +			data[len2++] = data[i];
> +	}
> +	page_pool_ring_unlock(pool);
> +
> +	/* Unlikely case of ptr_ring cache full, free pages outside producer
> +	 * lock, given put_page() with refcnt==1 can be an expensive operation.
> +	 */
> +	for (i = 0; i < len2; i++) {
> +		page_pool_return_page(pool, data[i]);
> +	}
> +}
> +EXPORT_SYMBOL(page_pool_put_page_bulk);
> +
>  static void page_pool_empty_ring(struct page_pool *pool)
>  {
>  	struct page *page;
> 
> 
> -- 
> Best regards,
>   Jesper Dangaard Brouer
>   MSc.CS, Principal Kernel Engineer at Red Hat
>   LinkedIn: http://www.linkedin.com/in/brouer
>
diff mbox series

Patch

diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 81d7773f96cd..b5b195305346 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -152,6 +152,8 @@  struct page_pool *page_pool_create(const struct page_pool_params *params);
 void page_pool_destroy(struct page_pool *pool);
 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
 void page_pool_release_page(struct page_pool *pool, struct page *page);
+void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+			     int count);
 #else
 static inline void page_pool_destroy(struct page_pool *pool)
 {
@@ -165,6 +167,11 @@  static inline void page_pool_release_page(struct page_pool *pool,
 					  struct page *page)
 {
 }
+
+static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+					   int count)
+{
+}
 #endif
 
 void page_pool_put_page(struct page_pool *pool, struct page *page,
@@ -215,4 +222,23 @@  static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
 	if (unlikely(pool->p.nid != new_nid))
 		page_pool_update_nid(pool, new_nid);
 }
+
+static inline void page_pool_ring_lock(struct page_pool *pool)
+	__acquires(&pool->ring.producer_lock)
+{
+	if (in_serving_softirq())
+		spin_lock(&pool->ring.producer_lock);
+	else
+		spin_lock_bh(&pool->ring.producer_lock);
+}
+
+static inline void page_pool_ring_unlock(struct page_pool *pool)
+	__releases(&pool->ring.producer_lock)
+{
+	if (in_serving_softirq())
+		spin_unlock(&pool->ring.producer_lock);
+	else
+		spin_unlock_bh(&pool->ring.producer_lock);
+}
+
 #endif /* _NET_PAGE_POOL_H */
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index ef98372facf6..236c5ed3aa66 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -11,6 +11,8 @@ 
 #include <linux/device.h>
 
 #include <net/page_pool.h>
+#include <net/xdp.h>
+
 #include <linux/dma-direction.h>
 #include <linux/dma-mapping.h>
 #include <linux/page-flags.h>
@@ -408,6 +410,39 @@  void page_pool_put_page(struct page_pool *pool, struct page *page,
 }
 EXPORT_SYMBOL(page_pool_put_page);
 
+void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+			     int count)
+{
+	int i, len = 0;
+
+	for (i = 0; i < count; i++) {
+		struct page *page = virt_to_head_page(data[i]);
+
+		if (likely(page_ref_count(page) == 1 &&
+			   pool_page_reusable(pool, page))) {
+			if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+				page_pool_dma_sync_for_device(pool, page, -1);
+
+			/* bulk pages for ptr_ring cache */
+			data[len++] = page;
+		} else {
+			page_pool_release_page(pool, page);
+			put_page(page);
+		}
+	}
+
+	/* Grab the producer spinlock for concurrent access to
+	 * ptr_ring page_pool cache
+	 */
+	page_pool_ring_lock(pool);
+	for (i = 0; i < len; i++) {
+		if (__ptr_ring_produce(&pool->ring, data[i]))
+			page_pool_return_page(pool, data[i]);
+	}
+	page_pool_ring_unlock(pool);
+}
+EXPORT_SYMBOL(page_pool_put_page_bulk);
+
 static void page_pool_empty_ring(struct page_pool *pool)
 {
 	struct page *page;
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 66ac275a0360..ff7c801bd40c 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -393,16 +393,11 @@  EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
 void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
 {
 	struct xdp_mem_allocator *xa = bq->xa;
-	int i;
 
-	if (unlikely(!xa))
+	if (unlikely(!xa || !bq->count))
 		return;
 
-	for (i = 0; i < bq->count; i++) {
-		struct page *page = virt_to_head_page(bq->q[i]);
-
-		page_pool_put_full_page(xa->page_pool, page, false);
-	}
+	page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
 	bq->count = 0;
 }
 EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);