diff mbox series

[v2,2/3] mm: don't hide potentially null memmap pointer in sparse_remove_one_section

Message ID 20190626061124.16013-3-alastair@au1.ibm.com (mailing list archive)
State New, archived
Headers show
Series mm: Cleanup & allow modules to hotplug memory | expand

Commit Message

Alastair D'Silva June 26, 2019, 6:11 a.m. UTC
From: Alastair D'Silva <alastair@d-silva.org>

By adding offset to memmap before passing it in to clear_hwpoisoned_pages,
we hide a potentially null memmap from the null check inside
clear_hwpoisoned_pages.

This patch passes the offset to clear_hwpoisoned_pages instead, allowing
memmap to successfully peform it's null check.

Signed-off-by: Alastair D'Silva <alastair@d-silva.org>
---
 mm/sparse.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

Comments

Michal Hocko June 26, 2019, 6:23 a.m. UTC | #1
On Wed 26-06-19 16:11:22, Alastair D'Silva wrote:
> From: Alastair D'Silva <alastair@d-silva.org>
> 
> By adding offset to memmap before passing it in to clear_hwpoisoned_pages,
> we hide a potentially null memmap from the null check inside
> clear_hwpoisoned_pages.
> 
> This patch passes the offset to clear_hwpoisoned_pages instead, allowing
> memmap to successfully peform it's null check.

Same issue with the changelog as the previous patch (missing WHY).

> 
> Signed-off-by: Alastair D'Silva <alastair@d-silva.org>
> ---
>  mm/sparse.c | 10 ++++++----
>  1 file changed, 6 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/sparse.c b/mm/sparse.c
> index 57a1a3d9c1cf..1ec32aef5590 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -753,7 +753,8 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
>  
>  #ifdef CONFIG_MEMORY_HOTREMOVE
>  #ifdef CONFIG_MEMORY_FAILURE
> -static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
> +static void clear_hwpoisoned_pages(struct page *memmap,
> +		unsigned long start, unsigned long count)
>  {
>  	int i;
>  
> @@ -769,7 +770,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
>  	if (atomic_long_read(&num_poisoned_pages) == 0)
>  		return;
>  
> -	for (i = 0; i < nr_pages; i++) {
> +	for (i = start; i < start + count; i++) {
>  		if (PageHWPoison(&memmap[i])) {
>  			atomic_long_sub(1, &num_poisoned_pages);
>  			ClearPageHWPoison(&memmap[i]);
> @@ -777,7 +778,8 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
>  	}
>  }
>  #else
> -static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
> +static inline void clear_hwpoisoned_pages(struct page *memmap,
> +		unsigned long start, unsigned long count)
>  {
>  }
>  #endif
> @@ -824,7 +826,7 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
>  		ms->pageblock_flags = NULL;
>  	}
>  
> -	clear_hwpoisoned_pages(memmap + map_offset,
> +	clear_hwpoisoned_pages(memmap, map_offset,
>  			PAGES_PER_SECTION - map_offset);
>  	free_section_usemap(memmap, usemap, altmap);
>  }
> -- 
> 2.21.0
Alastair D'Silva June 26, 2019, 6:30 a.m. UTC | #2
On Wed, 2019-06-26 at 08:23 +0200, Michal Hocko wrote:
> On Wed 26-06-19 16:11:22, Alastair D'Silva wrote:
> > From: Alastair D'Silva <alastair@d-silva.org>
> > 
> > By adding offset to memmap before passing it in to
> > clear_hwpoisoned_pages,
> > we hide a potentially null memmap from the null check inside
> > clear_hwpoisoned_pages.
> > 
> > This patch passes the offset to clear_hwpoisoned_pages instead,
> > allowing
> > memmap to successfully peform it's null check.
> 
> Same issue with the changelog as the previous patch (missing WHY).
> 

The first paragraph explains what the problem is with the existing code
(same applies to 1/3 too).
Michal Hocko June 26, 2019, 6:59 a.m. UTC | #3
On Wed 26-06-19 16:30:55, Alastair D'Silva wrote:
> On Wed, 2019-06-26 at 08:23 +0200, Michal Hocko wrote:
> > On Wed 26-06-19 16:11:22, Alastair D'Silva wrote:
> > > From: Alastair D'Silva <alastair@d-silva.org>
> > > 
> > > By adding offset to memmap before passing it in to
> > > clear_hwpoisoned_pages,
> > > we hide a potentially null memmap from the null check inside
> > > clear_hwpoisoned_pages.
> > > 
> > > This patch passes the offset to clear_hwpoisoned_pages instead,
> > > allowing
> > > memmap to successfully peform it's null check.
> > 
> > Same issue with the changelog as the previous patch (missing WHY).
> > 
> 
> The first paragraph explains what the problem is with the existing code
> (same applies to 1/3 too).

Under what conditions that happens? Is this a theoretical problem or can
you hit this by a (buggy) code? Please be much more specific.
David Hildenbrand June 28, 2019, 11:29 a.m. UTC | #4
On 26.06.19 08:11, Alastair D'Silva wrote:
> From: Alastair D'Silva <alastair@d-silva.org>
> 
> By adding offset to memmap before passing it in to clear_hwpoisoned_pages,
> we hide a potentially null memmap from the null check inside
> clear_hwpoisoned_pages.
> 
> This patch passes the offset to clear_hwpoisoned_pages instead, allowing
> memmap to successfully peform it's null check.
> 
> Signed-off-by: Alastair D'Silva <alastair@d-silva.org>
> ---
>  mm/sparse.c | 10 ++++++----
>  1 file changed, 6 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/sparse.c b/mm/sparse.c
> index 57a1a3d9c1cf..1ec32aef5590 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -753,7 +753,8 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
>  
>  #ifdef CONFIG_MEMORY_HOTREMOVE
>  #ifdef CONFIG_MEMORY_FAILURE
> -static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
> +static void clear_hwpoisoned_pages(struct page *memmap,
> +		unsigned long start, unsigned long count)
>  {
>  	int i;
>  
> @@ -769,7 +770,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
>  	if (atomic_long_read(&num_poisoned_pages) == 0)
>  		return;
>  
> -	for (i = 0; i < nr_pages; i++) {
> +	for (i = start; i < start + count; i++) {

start and count are unsigned long's, i is an int.

Besides that

Acked-by: David Hildenbrand <david@redhat.com>

>  		if (PageHWPoison(&memmap[i])) {
>  			atomic_long_sub(1, &num_poisoned_pages);
>  			ClearPageHWPoison(&memmap[i]);
> @@ -777,7 +778,8 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
>  	}
>  }
>  #else
> -static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
> +static inline void clear_hwpoisoned_pages(struct page *memmap,
> +		unsigned long start, unsigned long count)
>  {
>  }
>  #endif
> @@ -824,7 +826,7 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
>  		ms->pageblock_flags = NULL;
>  	}
>  
> -	clear_hwpoisoned_pages(memmap + map_offset,
> +	clear_hwpoisoned_pages(memmap, map_offset,
>  			PAGES_PER_SECTION - map_offset);
>  	free_section_usemap(memmap, usemap, altmap);
>  }
>
diff mbox series

Patch

diff --git a/mm/sparse.c b/mm/sparse.c
index 57a1a3d9c1cf..1ec32aef5590 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -753,7 +753,8 @@  int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
 
 #ifdef CONFIG_MEMORY_HOTREMOVE
 #ifdef CONFIG_MEMORY_FAILURE
-static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
+static void clear_hwpoisoned_pages(struct page *memmap,
+		unsigned long start, unsigned long count)
 {
 	int i;
 
@@ -769,7 +770,7 @@  static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
 	if (atomic_long_read(&num_poisoned_pages) == 0)
 		return;
 
-	for (i = 0; i < nr_pages; i++) {
+	for (i = start; i < start + count; i++) {
 		if (PageHWPoison(&memmap[i])) {
 			atomic_long_sub(1, &num_poisoned_pages);
 			ClearPageHWPoison(&memmap[i]);
@@ -777,7 +778,8 @@  static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
 	}
 }
 #else
-static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
+static inline void clear_hwpoisoned_pages(struct page *memmap,
+		unsigned long start, unsigned long count)
 {
 }
 #endif
@@ -824,7 +826,7 @@  void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
 		ms->pageblock_flags = NULL;
 	}
 
-	clear_hwpoisoned_pages(memmap + map_offset,
+	clear_hwpoisoned_pages(memmap, map_offset,
 			PAGES_PER_SECTION - map_offset);
 	free_section_usemap(memmap, usemap, altmap);
 }