diff mbox series

[2/6] mm/page_alloc: Rename free_the_page() to free_frozen_pages()

Message ID 20220531150611.1303156-3-willy@infradead.org (mailing list archive)
State New
Headers show
Series Allocate and free frozen pages | expand

Commit Message

Matthew Wilcox May 31, 2022, 3:06 p.m. UTC
In preparation for making this function available outside page_alloc,
rename it to free_frozen_pages(), which fits better with the other
memory allocation/free functions.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/page_alloc.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

Comments

David Hildenbrand May 31, 2022, 5:02 p.m. UTC | #1
On 31.05.22 17:06, Matthew Wilcox (Oracle) wrote:
> In preparation for making this function available outside page_alloc,
> rename it to free_frozen_pages(), which fits better with the other
> memory allocation/free functions.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Reviewed-by: David Hildenbrand <david@redhat.com>
Miaohe Lin June 1, 2022, 6:58 a.m. UTC | #2
On 2022/5/31 23:06, Matthew Wilcox (Oracle) wrote:
> In preparation for making this function available outside page_alloc,
> rename it to free_frozen_pages(), which fits better with the other
> memory allocation/free functions.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>

Thanks!

BTW: I thought it means we can free the temporarily frozen page (via page_ref_freeze) now
when I saw the name "free_frozen_pages". ;)

> ---
>  mm/page_alloc.c | 14 +++++++-------
>  1 file changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 68bb77900f67..6a8676cb69db 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -687,7 +687,7 @@ static inline bool pcp_allowed_order(unsigned int order)
>  	return false;
>  }
>  
> -static inline void free_the_page(struct page *page, unsigned int order)
> +static inline void free_frozen_pages(struct page *page, unsigned int order)
>  {
>  	if (pcp_allowed_order(order))		/* Via pcp? */
>  		free_unref_page(page, order);
> @@ -713,7 +713,7 @@ static inline void free_the_page(struct page *page, unsigned int order)
>  void free_compound_page(struct page *page)
>  {
>  	mem_cgroup_uncharge(page_folio(page));
> -	free_the_page(page, compound_order(page));
> +	free_frozen_pages(page, compound_order(page));
>  }
>  
>  static void prep_compound_head(struct page *page, unsigned int order)
> @@ -5507,10 +5507,10 @@ EXPORT_SYMBOL(get_zeroed_page);
>  void __free_pages(struct page *page, unsigned int order)
>  {
>  	if (put_page_testzero(page))
> -		free_the_page(page, order);
> +		free_frozen_pages(page, order);
>  	else if (!PageHead(page))
>  		while (order-- > 0)
> -			free_the_page(page + (1 << order), order);
> +			free_frozen_pages(page + (1 << order), order);
>  }
>  EXPORT_SYMBOL(__free_pages);
>  
> @@ -5561,7 +5561,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
>  	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
>  
>  	if (page_ref_sub_and_test(page, count))
> -		free_the_page(page, compound_order(page));
> +		free_frozen_pages(page, compound_order(page));
>  }
>  EXPORT_SYMBOL(__page_frag_cache_drain);
>  
> @@ -5602,7 +5602,7 @@ void *page_frag_alloc_align(struct page_frag_cache *nc,
>  			goto refill;
>  
>  		if (unlikely(nc->pfmemalloc)) {
> -			free_the_page(page, compound_order(page));
> +			free_frozen_pages(page, compound_order(page));
>  			goto refill;
>  		}
>  
> @@ -5634,7 +5634,7 @@ void page_frag_free(void *addr)
>  	struct page *page = virt_to_head_page(addr);
>  
>  	if (unlikely(put_page_testzero(page)))
> -		free_the_page(page, compound_order(page));
> +		free_frozen_pages(page, compound_order(page));
>  }
>  EXPORT_SYMBOL(page_frag_free);
>  
>
Matthew Wilcox June 1, 2022, 12:23 p.m. UTC | #3
On Wed, Jun 01, 2022 at 02:58:23PM +0800, Miaohe Lin wrote:
> On 2022/5/31 23:06, Matthew Wilcox (Oracle) wrote:
> > In preparation for making this function available outside page_alloc,
> > rename it to free_frozen_pages(), which fits better with the other
> > memory allocation/free functions.
> > 
> > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> 
> Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
> 
> Thanks!
> 
> BTW: I thought it means we can free the temporarily frozen page (via page_ref_freeze) now
> when I saw the name "free_frozen_pages". ;)

Well, you can.  Before you'd have to do it by calling free_unref_page(),
but now you can do it by calling free_frozen_pages().
Miaohe Lin June 2, 2022, 7:45 a.m. UTC | #4
On 2022/6/1 20:23, Matthew Wilcox wrote:
> On Wed, Jun 01, 2022 at 02:58:23PM +0800, Miaohe Lin wrote:
>> On 2022/5/31 23:06, Matthew Wilcox (Oracle) wrote:
>>> In preparation for making this function available outside page_alloc,
>>> rename it to free_frozen_pages(), which fits better with the other
>>> memory allocation/free functions.
>>>
>>> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
>>
>> Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
>>
>> Thanks!
>>
>> BTW: I thought it means we can free the temporarily frozen page (via page_ref_freeze) now
>> when I saw the name "free_frozen_pages". ;)
> 
> Well, you can.  Before you'd have to do it by calling free_unref_page(),
> but now you can do it by calling free_frozen_pages().

Sorry, it's my mistake. We can do this indeed, e.g. shrink_page_list is doing this via __remove_mapping
+ free_unref_page_list.

Thanks! :)

> 
> .
>
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 68bb77900f67..6a8676cb69db 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -687,7 +687,7 @@  static inline bool pcp_allowed_order(unsigned int order)
 	return false;
 }
 
-static inline void free_the_page(struct page *page, unsigned int order)
+static inline void free_frozen_pages(struct page *page, unsigned int order)
 {
 	if (pcp_allowed_order(order))		/* Via pcp? */
 		free_unref_page(page, order);
@@ -713,7 +713,7 @@  static inline void free_the_page(struct page *page, unsigned int order)
 void free_compound_page(struct page *page)
 {
 	mem_cgroup_uncharge(page_folio(page));
-	free_the_page(page, compound_order(page));
+	free_frozen_pages(page, compound_order(page));
 }
 
 static void prep_compound_head(struct page *page, unsigned int order)
@@ -5507,10 +5507,10 @@  EXPORT_SYMBOL(get_zeroed_page);
 void __free_pages(struct page *page, unsigned int order)
 {
 	if (put_page_testzero(page))
-		free_the_page(page, order);
+		free_frozen_pages(page, order);
 	else if (!PageHead(page))
 		while (order-- > 0)
-			free_the_page(page + (1 << order), order);
+			free_frozen_pages(page + (1 << order), order);
 }
 EXPORT_SYMBOL(__free_pages);
 
@@ -5561,7 +5561,7 @@  void __page_frag_cache_drain(struct page *page, unsigned int count)
 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
 
 	if (page_ref_sub_and_test(page, count))
-		free_the_page(page, compound_order(page));
+		free_frozen_pages(page, compound_order(page));
 }
 EXPORT_SYMBOL(__page_frag_cache_drain);
 
@@ -5602,7 +5602,7 @@  void *page_frag_alloc_align(struct page_frag_cache *nc,
 			goto refill;
 
 		if (unlikely(nc->pfmemalloc)) {
-			free_the_page(page, compound_order(page));
+			free_frozen_pages(page, compound_order(page));
 			goto refill;
 		}
 
@@ -5634,7 +5634,7 @@  void page_frag_free(void *addr)
 	struct page *page = virt_to_head_page(addr);
 
 	if (unlikely(put_page_testzero(page)))
-		free_the_page(page, compound_order(page));
+		free_frozen_pages(page, compound_order(page));
 }
 EXPORT_SYMBOL(page_frag_free);