diff mbox series

[2/5] mm: don't hide potentially null memmap pointer in sparse_remove_one_section

Message ID 20190617043635.13201-3-alastair@au1.ibm.com (mailing list archive)
State New, archived
Headers show
Series mm: Cleanup & allow modules to hotplug memory | expand

Commit Message

Alastair D'Silva June 17, 2019, 4:36 a.m. UTC
From: Alastair D'Silva <alastair@d-silva.org>

By adding offset to memmap before passing it in to clear_hwpoisoned_pages,
is hides a potentially null memmap from the null check inside
clear_hwpoisoned_pages.

This patch passes the offset to clear_hwpoisoned_pages instead, allowing
memmap to successfully peform it's null check.

Signed-off-by: Alastair D'Silva <alastair@d-silva.org>
---
 mm/sparse.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

Comments

Mike Rapoport June 17, 2019, 6:49 a.m. UTC | #1
On Mon, Jun 17, 2019 at 02:36:28PM +1000, Alastair D'Silva wrote:
> From: Alastair D'Silva <alastair@d-silva.org>
> 
> By adding offset to memmap before passing it in to clear_hwpoisoned_pages,
> is hides a potentially null memmap from the null check inside
> clear_hwpoisoned_pages.
> 
> This patch passes the offset to clear_hwpoisoned_pages instead, allowing
> memmap to successfully peform it's null check.
> 
> Signed-off-by: Alastair D'Silva <alastair@d-silva.org>

One nit below, otherwise

Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>

> ---
>  mm/sparse.c | 12 +++++++-----
>  1 file changed, 7 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/sparse.c b/mm/sparse.c
> index 104a79fedd00..66a99da9b11b 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -746,12 +746,14 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
>  		kfree(usemap);
>  		__kfree_section_memmap(memmap, altmap);
>  	}
> +

The whitespace change here is not related

>  	return ret;
>  }
> 
>  #ifdef CONFIG_MEMORY_HOTREMOVE
>  #ifdef CONFIG_MEMORY_FAILURE
> -static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
> +static void clear_hwpoisoned_pages(struct page *memmap,
> +		unsigned long map_offset, int nr_pages)
>  {
>  	int i;
> 
> @@ -767,7 +769,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
>  	if (atomic_long_read(&num_poisoned_pages) == 0)
>  		return;
> 
> -	for (i = 0; i < nr_pages; i++) {
> +	for (i = map_offset; i < nr_pages; i++) {
>  		if (PageHWPoison(&memmap[i])) {
>  			atomic_long_sub(1, &num_poisoned_pages);
>  			ClearPageHWPoison(&memmap[i]);
> @@ -775,7 +777,8 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
>  	}
>  }
>  #else
> -static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
> +static inline void clear_hwpoisoned_pages(struct page *memmap,
> +		unsigned long map_offset, int nr_pages)
>  {
>  }
>  #endif
> @@ -822,8 +825,7 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
>  		ms->pageblock_flags = NULL;
>  	}
> 
> -	clear_hwpoisoned_pages(memmap + map_offset,
> -			PAGES_PER_SECTION - map_offset);
> +	clear_hwpoisoned_pages(memmap, map_offset, PAGES_PER_SECTION);
>  	free_section_usemap(memmap, usemap, altmap);
>  }
>  #endif /* CONFIG_MEMORY_HOTREMOVE */
> -- 
> 2.21.0
>
David Hildenbrand June 17, 2019, 7:26 a.m. UTC | #2
On 17.06.19 06:36, Alastair D'Silva wrote:
> From: Alastair D'Silva <alastair@d-silva.org>
> 
> By adding offset to memmap before passing it in to clear_hwpoisoned_pages,
> is hides a potentially null memmap from the null check inside
> clear_hwpoisoned_pages.
> 
> This patch passes the offset to clear_hwpoisoned_pages instead, allowing
> memmap to successfully peform it's null check.
> 
> Signed-off-by: Alastair D'Silva <alastair@d-silva.org>
> ---
>  mm/sparse.c | 12 +++++++-----
>  1 file changed, 7 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/sparse.c b/mm/sparse.c
> index 104a79fedd00..66a99da9b11b 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -746,12 +746,14 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
>  		kfree(usemap);
>  		__kfree_section_memmap(memmap, altmap);
>  	}
> +
>  	return ret;
>  }
>  
>  #ifdef CONFIG_MEMORY_HOTREMOVE
>  #ifdef CONFIG_MEMORY_FAILURE
> -static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
> +static void clear_hwpoisoned_pages(struct page *memmap,
> +		unsigned long map_offset, int nr_pages)
>  {
>  	int i;
>  
> @@ -767,7 +769,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
>  	if (atomic_long_read(&num_poisoned_pages) == 0)
>  		return;
>  
> -	for (i = 0; i < nr_pages; i++) {
> +	for (i = map_offset; i < nr_pages; i++) {
>  		if (PageHWPoison(&memmap[i])) {
>  			atomic_long_sub(1, &num_poisoned_pages);
>  			ClearPageHWPoison(&memmap[i]);
> @@ -775,7 +777,8 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
>  	}
>  }
>  #else
> -static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
> +static inline void clear_hwpoisoned_pages(struct page *memmap,
> +		unsigned long map_offset, int nr_pages)

I somewhat dislike that map_offset modifies nr_pages internally.

I would prefer decoupling both and passing the actual number of pages to
clear instead:

clear_hwpoisoned_pages(memmap, map_offset,
		       PAGES_PER_SECTION - map_offset);


>  {
>  }
>  #endif
> @@ -822,8 +825,7 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
>  		ms->pageblock_flags = NULL;
>  	}
>  
> -	clear_hwpoisoned_pages(memmap + map_offset,
> -			PAGES_PER_SECTION - map_offset);
> +	clear_hwpoisoned_pages(memmap, map_offset, PAGES_PER_SECTION);
>  	free_section_usemap(memmap, usemap, altmap);
>  }
>  #endif /* CONFIG_MEMORY_HOTREMOVE */
>
diff mbox series

Patch

diff --git a/mm/sparse.c b/mm/sparse.c
index 104a79fedd00..66a99da9b11b 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -746,12 +746,14 @@  int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
 		kfree(usemap);
 		__kfree_section_memmap(memmap, altmap);
 	}
+
 	return ret;
 }
 
 #ifdef CONFIG_MEMORY_HOTREMOVE
 #ifdef CONFIG_MEMORY_FAILURE
-static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
+static void clear_hwpoisoned_pages(struct page *memmap,
+		unsigned long map_offset, int nr_pages)
 {
 	int i;
 
@@ -767,7 +769,7 @@  static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
 	if (atomic_long_read(&num_poisoned_pages) == 0)
 		return;
 
-	for (i = 0; i < nr_pages; i++) {
+	for (i = map_offset; i < nr_pages; i++) {
 		if (PageHWPoison(&memmap[i])) {
 			atomic_long_sub(1, &num_poisoned_pages);
 			ClearPageHWPoison(&memmap[i]);
@@ -775,7 +777,8 @@  static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
 	}
 }
 #else
-static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
+static inline void clear_hwpoisoned_pages(struct page *memmap,
+		unsigned long map_offset, int nr_pages)
 {
 }
 #endif
@@ -822,8 +825,7 @@  void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
 		ms->pageblock_flags = NULL;
 	}
 
-	clear_hwpoisoned_pages(memmap + map_offset,
-			PAGES_PER_SECTION - map_offset);
+	clear_hwpoisoned_pages(memmap, map_offset, PAGES_PER_SECTION);
 	free_section_usemap(memmap, usemap, altmap);
 }
 #endif /* CONFIG_MEMORY_HOTREMOVE */