diff mbox series

[v2,06/24] page_pool: Convert page_pool_return_page() to page_pool_return_netmem()

Message ID 20230105214631.3939268-7-willy@infradead.org (mailing list archive)
State New
Headers show
Series Split netmem from struct page | expand

Commit Message

Matthew Wilcox Jan. 5, 2023, 9:46 p.m. UTC
Removes a call to compound_head(), saving 464 bytes of kernel text
as page_pool_return_page() is inlined seven times.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 net/core/page_pool.c | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

Comments

Jesper Dangaard Brouer Jan. 6, 2023, 2:10 p.m. UTC | #1
On 05/01/2023 22.46, Matthew Wilcox (Oracle) wrote:
> Removes a call to compound_head(), saving 464 bytes of kernel text
> as page_pool_return_page() is inlined seven times.

Nice save for I-cache :-)

> Signed-off-by: Matthew Wilcox (Oracle)<willy@infradead.org>
> ---
>   net/core/page_pool.c | 14 ++++++++++----
>   1 file changed, 10 insertions(+), 4 deletions(-)

Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Ilias Apalodimas Jan. 10, 2023, 9:39 a.m. UTC | #2
On Thu, Jan 05, 2023 at 09:46:13PM +0000, Matthew Wilcox (Oracle) wrote:
> Removes a call to compound_head(), saving 464 bytes of kernel text
> as page_pool_return_page() is inlined seven times.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  net/core/page_pool.c | 14 ++++++++++----
>  1 file changed, 10 insertions(+), 4 deletions(-)
>
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 4e985502c569..b606952773a6 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -220,7 +220,13 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
>  }
>  EXPORT_SYMBOL(page_pool_create);
>
> -static void page_pool_return_page(struct page_pool *pool, struct page *page);
> +static void page_pool_return_netmem(struct page_pool *pool, struct netmem *nm);
> +
> +static inline
> +void page_pool_return_page(struct page_pool *pool, struct page *page)
> +{
> +	page_pool_return_netmem(pool, page_netmem(page));
> +}
>
>  noinline
>  static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
> @@ -499,11 +505,11 @@ void page_pool_release_netmem(struct page_pool *pool, struct netmem *nmem)
>  EXPORT_SYMBOL(page_pool_release_netmem);
>
>  /* Return a page to the page allocator, cleaning up our state */
> -static void page_pool_return_page(struct page_pool *pool, struct page *page)
> +static void page_pool_return_netmem(struct page_pool *pool, struct netmem *nmem)
>  {
> -	page_pool_release_page(pool, page);
> +	page_pool_release_netmem(pool, nmem);
>
> -	put_page(page);
> +	netmem_put(nmem);
>  	/* An optimization would be to call __free_pages(page, pool->p.order)
>  	 * knowing page is not part of page-cache (thus avoiding a
>  	 * __page_cache_release() call).
> --
> 2.35.1
>

Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
diff mbox series

Patch

diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 4e985502c569..b606952773a6 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -220,7 +220,13 @@  struct page_pool *page_pool_create(const struct page_pool_params *params)
 }
 EXPORT_SYMBOL(page_pool_create);
 
-static void page_pool_return_page(struct page_pool *pool, struct page *page);
+static void page_pool_return_netmem(struct page_pool *pool, struct netmem *nm);
+
+static inline
+void page_pool_return_page(struct page_pool *pool, struct page *page)
+{
+	page_pool_return_netmem(pool, page_netmem(page));
+}
 
 noinline
 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
@@ -499,11 +505,11 @@  void page_pool_release_netmem(struct page_pool *pool, struct netmem *nmem)
 EXPORT_SYMBOL(page_pool_release_netmem);
 
 /* Return a page to the page allocator, cleaning up our state */
-static void page_pool_return_page(struct page_pool *pool, struct page *page)
+static void page_pool_return_netmem(struct page_pool *pool, struct netmem *nmem)
 {
-	page_pool_release_page(pool, page);
+	page_pool_release_netmem(pool, nmem);
 
-	put_page(page);
+	netmem_put(nmem);
 	/* An optimization would be to call __free_pages(page, pool->p.order)
 	 * knowing page is not part of page-cache (thus avoiding a
 	 * __page_cache_release() call).