diff mbox series

[v2,03/16] mm/page_alloc: Export free_frozen_pages() instead of free_unref_page()

Message ID 20220809171854.3725722-4-willy@infradead.org (mailing list archive)
State New
Headers show
Series Allocate and free frozen pages | expand

Commit Message

Matthew Wilcox Aug. 9, 2022, 5:18 p.m. UTC
This API makes more sense for slab to use and it works perfectly
well for swap.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
---
 mm/internal.h   |  4 ++--
 mm/page_alloc.c | 18 +++++++++---------
 mm/swap.c       |  2 +-
 3 files changed, 12 insertions(+), 12 deletions(-)

Comments

Miaohe Lin Aug. 10, 2022, 3 a.m. UTC | #1
On 2022/8/10 1:18, Matthew Wilcox (Oracle) wrote:
> This API makes more sense for slab to use and it works perfectly
> well for swap.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Reviewed-by: David Hildenbrand <david@redhat.com>
> Reviewed-by: William Kucharski <william.kucharski@oracle.com>

Looks good to me. Thanks.

Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>

> ---
>  mm/internal.h   |  4 ++--
>  mm/page_alloc.c | 18 +++++++++---------
>  mm/swap.c       |  2 +-
>  3 files changed, 12 insertions(+), 12 deletions(-)
> 
> diff --git a/mm/internal.h b/mm/internal.h
> index 785409805ed7..08d0881223cf 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -362,8 +362,8 @@ extern void post_alloc_hook(struct page *page, unsigned int order,
>  					gfp_t gfp_flags);
>  extern int user_min_free_kbytes;
>  
> -extern void free_unref_page(struct page *page, unsigned int order);
> -extern void free_unref_page_list(struct list_head *list);
> +void free_frozen_pages(struct page *, unsigned int order);
> +void free_unref_page_list(struct list_head *list);
>  
>  extern void zone_pcp_update(struct zone *zone, int cpu_online);
>  extern void zone_pcp_reset(struct zone *zone);
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 04260b5a7699..30e7a5974d39 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -761,14 +761,6 @@ static inline bool pcp_allowed_order(unsigned int order)
>  	return false;
>  }
>  
> -static inline void free_frozen_pages(struct page *page, unsigned int order)
> -{
> -	if (pcp_allowed_order(order))		/* Via pcp? */
> -		free_unref_page(page, order);
> -	else
> -		__free_pages_ok(page, order, FPI_NONE);
> -}
> -
>  /*
>   * Higher-order pages are called "compound pages".  They are structured thusly:
>   *
> @@ -3464,7 +3456,7 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
>  /*
>   * Free a pcp page
>   */
> -void free_unref_page(struct page *page, unsigned int order)
> +static void free_unref_page(struct page *page, unsigned int order)
>  {
>  	unsigned long flags;
>  	unsigned long __maybe_unused UP_flags;
> @@ -3504,6 +3496,14 @@ void free_unref_page(struct page *page, unsigned int order)
>  	pcp_trylock_finish(UP_flags);
>  }
>  
> +void free_frozen_pages(struct page *page, unsigned int order)
> +{
> +	if (pcp_allowed_order(order))		/* Via pcp? */
> +		free_unref_page(page, order);
> +	else
> +		__free_pages_ok(page, order, FPI_NONE);
> +}
> +
>  /*
>   * Free a list of 0-order pages
>   */
> diff --git a/mm/swap.c b/mm/swap.c
> index 6525011b715e..647f6f77193f 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -102,7 +102,7 @@ static void __folio_put_small(struct folio *folio)
>  {
>  	__page_cache_release(folio);
>  	mem_cgroup_uncharge(folio);
> -	free_unref_page(&folio->page, 0);
> +	free_frozen_pages(&folio->page, 0);
>  }
>  
>  static void __folio_put_large(struct folio *folio)
>
Muchun Song Aug. 10, 2022, 6:37 a.m. UTC | #2
> On Aug 10, 2022, at 01:18, Matthew Wilcox (Oracle) <willy@infradead.org> wrote:
> 
> This API makes more sense for slab to use and it works perfectly
> well for swap.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Reviewed-by: David Hildenbrand <david@redhat.com>
> Reviewed-by: William Kucharski <william.kucharski@oracle.com>

Reviewed-by: Muchun Song <songmuchun@bytedance.com>

Thanks.
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index 785409805ed7..08d0881223cf 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -362,8 +362,8 @@  extern void post_alloc_hook(struct page *page, unsigned int order,
 					gfp_t gfp_flags);
 extern int user_min_free_kbytes;
 
-extern void free_unref_page(struct page *page, unsigned int order);
-extern void free_unref_page_list(struct list_head *list);
+void free_frozen_pages(struct page *, unsigned int order);
+void free_unref_page_list(struct list_head *list);
 
 extern void zone_pcp_update(struct zone *zone, int cpu_online);
 extern void zone_pcp_reset(struct zone *zone);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 04260b5a7699..30e7a5974d39 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -761,14 +761,6 @@  static inline bool pcp_allowed_order(unsigned int order)
 	return false;
 }
 
-static inline void free_frozen_pages(struct page *page, unsigned int order)
-{
-	if (pcp_allowed_order(order))		/* Via pcp? */
-		free_unref_page(page, order);
-	else
-		__free_pages_ok(page, order, FPI_NONE);
-}
-
 /*
  * Higher-order pages are called "compound pages".  They are structured thusly:
  *
@@ -3464,7 +3456,7 @@  static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
 /*
  * Free a pcp page
  */
-void free_unref_page(struct page *page, unsigned int order)
+static void free_unref_page(struct page *page, unsigned int order)
 {
 	unsigned long flags;
 	unsigned long __maybe_unused UP_flags;
@@ -3504,6 +3496,14 @@  void free_unref_page(struct page *page, unsigned int order)
 	pcp_trylock_finish(UP_flags);
 }
 
+void free_frozen_pages(struct page *page, unsigned int order)
+{
+	if (pcp_allowed_order(order))		/* Via pcp? */
+		free_unref_page(page, order);
+	else
+		__free_pages_ok(page, order, FPI_NONE);
+}
+
 /*
  * Free a list of 0-order pages
  */
diff --git a/mm/swap.c b/mm/swap.c
index 6525011b715e..647f6f77193f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -102,7 +102,7 @@  static void __folio_put_small(struct folio *folio)
 {
 	__page_cache_release(folio);
 	mem_cgroup_uncharge(folio);
-	free_unref_page(&folio->page, 0);
+	free_frozen_pages(&folio->page, 0);
 }
 
 static void __folio_put_large(struct folio *folio)