diff mbox series

mm/page_alloc: invert logic for early page initialisation checks

Message ID 20230104191805.2535864-1-rppt@kernel.org (mailing list archive)
State New
Headers show
Series mm/page_alloc: invert logic for early page initialisation checks | expand

Commit Message

Mike Rapoport Jan. 4, 2023, 7:18 p.m. UTC
From: "Mike Rapoport (IBM)" <rppt@kernel.org>

Rename early_page_uninitialised() to early_page_initialised() and invert
its logic to make the code more readable.

Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
---
 mm/page_alloc.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

Comments

Anshuman Khandual Jan. 5, 2023, 8:44 a.m. UTC | #1
On 1/5/23 00:48, Mike Rapoport wrote:
> From: "Mike Rapoport (IBM)" <rppt@kernel.org>
> 
> Rename early_page_uninitialised() to early_page_initialised() and invert
> its logic to make the code more readable.
> 
> Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
> ---

LGTM

Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>

>  mm/page_alloc.c | 16 ++++++++--------
>  1 file changed, 8 insertions(+), 8 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 0745aedebb37..a881f2d42b2c 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -443,15 +443,15 @@ static inline bool deferred_pages_enabled(void)
>  	return static_branch_unlikely(&deferred_pages);
>  }
>  
> -/* Returns true if the struct page for the pfn is uninitialised */
> -static inline bool __meminit early_page_uninitialised(unsigned long pfn)
> +/* Returns true if the struct page for the pfn is initialised */
> +static inline bool __meminit early_page_initialised(unsigned long pfn)
>  {
>  	int nid = early_pfn_to_nid(pfn);
>  
>  	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
> -		return true;
> +		return false;
>  
> -	return false;
> +	return true;
>  }
>  
>  /*
> @@ -498,9 +498,9 @@ static inline bool deferred_pages_enabled(void)
>  	return false;
>  }
>  
> -static inline bool early_page_uninitialised(unsigned long pfn)
> +static inline bool early_page_initialised(unsigned long pfn)
>  {
> -	return false;
> +	return true;
>  }
>  
>  static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
> @@ -1641,7 +1641,7 @@ static void __meminit init_reserved_page(unsigned long pfn)
>  	pg_data_t *pgdat;
>  	int nid, zid;
>  
> -	if (!early_page_uninitialised(pfn))
> +	if (early_page_initialised(pfn))
>  		return;
>  
>  	nid = early_pfn_to_nid(pfn);
> @@ -1804,7 +1804,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
>  void __init memblock_free_pages(struct page *page, unsigned long pfn,
>  							unsigned int order)
>  {
> -	if (early_page_uninitialised(pfn))
> +	if (!early_page_initialised(pfn))
>  		return;
>  	if (!kmsan_memblock_free_pages(page, order)) {
>  		/* KMSAN will take care of these pages. */
David Hildenbrand Jan. 5, 2023, 12:59 p.m. UTC | #2
On 04.01.23 20:18, Mike Rapoport wrote:
> From: "Mike Rapoport (IBM)" <rppt@kernel.org>
> 
> Rename early_page_uninitialised() to early_page_initialised() and invert
> its logic to make the code more readable.
> 
> Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>

Reviewed-by: David Hildenbrand <david@redhat.com>
Mel Gorman Jan. 9, 2023, 1:31 p.m. UTC | #3
On Wed, Jan 04, 2023 at 09:18:05PM +0200, Mike Rapoport wrote:
> From: "Mike Rapoport (IBM)" <rppt@kernel.org>
> 
> Rename early_page_uninitialised() to early_page_initialised() and invert
> its logic to make the code more readable.
> 
> Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>

Acked-by: Mel Gorman <mgorman@suse.de>
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0745aedebb37..a881f2d42b2c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -443,15 +443,15 @@  static inline bool deferred_pages_enabled(void)
 	return static_branch_unlikely(&deferred_pages);
 }
 
-/* Returns true if the struct page for the pfn is uninitialised */
-static inline bool __meminit early_page_uninitialised(unsigned long pfn)
+/* Returns true if the struct page for the pfn is initialised */
+static inline bool __meminit early_page_initialised(unsigned long pfn)
 {
 	int nid = early_pfn_to_nid(pfn);
 
 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
-		return true;
+		return false;
 
-	return false;
+	return true;
 }
 
 /*
@@ -498,9 +498,9 @@  static inline bool deferred_pages_enabled(void)
 	return false;
 }
 
-static inline bool early_page_uninitialised(unsigned long pfn)
+static inline bool early_page_initialised(unsigned long pfn)
 {
-	return false;
+	return true;
 }
 
 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
@@ -1641,7 +1641,7 @@  static void __meminit init_reserved_page(unsigned long pfn)
 	pg_data_t *pgdat;
 	int nid, zid;
 
-	if (!early_page_uninitialised(pfn))
+	if (early_page_initialised(pfn))
 		return;
 
 	nid = early_pfn_to_nid(pfn);
@@ -1804,7 +1804,7 @@  int __meminit early_pfn_to_nid(unsigned long pfn)
 void __init memblock_free_pages(struct page *page, unsigned long pfn,
 							unsigned int order)
 {
-	if (early_page_uninitialised(pfn))
+	if (!early_page_initialised(pfn))
 		return;
 	if (!kmsan_memblock_free_pages(page, order)) {
 		/* KMSAN will take care of these pages. */