diff mbox series

[RFC,v1,1/4] mm, hwpoison, hugetlb: introduce SUBPAGE_INDEX_HWPOISON to save raw error page

Message ID 20220427042841.678351-2-naoya.horiguchi@linux.dev (mailing list archive)
State New
Headers show
Series mm, hwpoison: improve handling workload related to hugetlb and memory_hotplug | expand

Commit Message

Naoya Horiguchi April 27, 2022, 4:28 a.m. UTC
From: Naoya Horiguchi <naoya.horiguchi@nec.com>

When handling memory error on a hugetlb page, the error handler tries to
dissolve and turn it into 4kB pages.  If it's successfully dissolved,
PageHWPoison flag is moved to the raw error page, so but that's all
right.  However, dissolve sometimes fails, then the error page is left
as hwpoisoned hugepage. It's useful if we can retry to dissolve it to
save healthy pages, but that's not possible now because the information
about where the raw error page is lost.

Use the private field of a tail page to keep that information.  The code
path of shrinking hugepage pool used this info to try delayed dissolve.

Signed-off-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
---
 include/linux/hugetlb.h | 24 ++++++++++++++++++++++++
 mm/hugetlb.c            |  9 +++++++++
 mm/memory-failure.c     |  2 ++
 3 files changed, 35 insertions(+)

Comments

Miaohe Lin April 27, 2022, 7:11 a.m. UTC | #1
On 2022/4/27 12:28, Naoya Horiguchi wrote:
> From: Naoya Horiguchi <naoya.horiguchi@nec.com>
> 
> When handling memory error on a hugetlb page, the error handler tries to
> dissolve and turn it into 4kB pages.  If it's successfully dissolved,
> PageHWPoison flag is moved to the raw error page, so but that's all

s/so but/so/

> right.  However, dissolve sometimes fails, then the error page is left
> as hwpoisoned hugepage. It's useful if we can retry to dissolve it to
> save healthy pages, but that's not possible now because the information
> about where the raw error page is lost.
> 
> Use the private field of a tail page to keep that information.  The code

Only one raw error page is saved now. Should this be ok? I think so as memory
failure should be rare anyway?

> path of shrinking hugepage pool used this info to try delayed dissolve.
> 
> Signed-off-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
> ---
>  include/linux/hugetlb.h | 24 ++++++++++++++++++++++++
>  mm/hugetlb.c            |  9 +++++++++
>  mm/memory-failure.c     |  2 ++
>  3 files changed, 35 insertions(+)
> 
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index ac2a1d758a80..689e69cb556b 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -42,6 +42,9 @@ enum {
>  	SUBPAGE_INDEX_CGROUP,		/* reuse page->private */
>  	SUBPAGE_INDEX_CGROUP_RSVD,	/* reuse page->private */
>  	__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
> +#endif
> +#ifdef CONFIG_CGROUP_HUGETLB
> +	SUBPAGE_INDEX_HWPOISON,
>  #endif

Do we rely on the CONFIG_CGROUP_HUGETLB to store the raw error page?

>  	__NR_USED_SUBPAGE,
>  };
> @@ -784,6 +787,27 @@ extern int dissolve_free_huge_page(struct page *page);
>  extern int dissolve_free_huge_pages(unsigned long start_pfn,
>  				    unsigned long end_pfn);
>  
> +#ifdef CONFIG_MEMORY_FAILURE
> +/*
> + * pointer to raw error page is located in hpage[SUBPAGE_INDEX_HWPOISON].private
> + */
> +static inline struct page *hugetlb_page_hwpoison(struct page *hpage)
> +{
> +	return (void *)page_private(hpage + SUBPAGE_INDEX_HWPOISON);
> +}
> +
> +static inline void hugetlb_set_page_hwpoison(struct page *hpage,
> +					struct page *page)
> +{
> +	set_page_private(hpage + SUBPAGE_INDEX_HWPOISON, (unsigned long)page);
> +}
> +#else
> +static inline struct page *hugetlb_page_hwpoison(struct page *hpage)
> +{
> +	return NULL;
> +}
> +#endif
> +
>  #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
>  #ifndef arch_hugetlb_migration_supported
>  static inline bool arch_hugetlb_migration_supported(struct hstate *h)
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index f8e048b939c7..6867ea8345d1 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1547,6 +1547,15 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
>  		return;
>  	}
>  
> +	if (unlikely(PageHWPoison(page))) {
> +		struct page *raw_error = hugetlb_page_hwpoison(page);
> +
> +		if (raw_error && raw_error != page) {
> +			SetPageHWPoison(raw_error);
> +			ClearPageHWPoison(page);
> +		}
> +	}
> +
>  	for (i = 0; i < pages_per_huge_page(h);
>  	     i++, subpage = mem_map_next(subpage, page, i)) {
>  		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index 3e36fc19c4d1..73948a00ad4a 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -1535,6 +1535,8 @@ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
>  		goto out;
>  	}
>  
> +	hugetlb_set_page_hwpoison(head, page);
> +
>  	return ret;
>  out:
>  	if (count_increased)
> 

This patch looks good to me. I will learn this series more when I get more time.
Many thanks for your hard work! :)
HORIGUCHI NAOYA(堀口 直也) April 27, 2022, 1:03 p.m. UTC | #2
On Wed, Apr 27, 2022 at 03:11:31PM +0800, Miaohe Lin wrote:
> On 2022/4/27 12:28, Naoya Horiguchi wrote:
> > From: Naoya Horiguchi <naoya.horiguchi@nec.com>
> > 
> > When handling memory error on a hugetlb page, the error handler tries to
> > dissolve and turn it into 4kB pages.  If it's successfully dissolved,
> > PageHWPoison flag is moved to the raw error page, so but that's all
> 
> s/so but/so/

Fixed, thank you.

> 
> > right.  However, dissolve sometimes fails, then the error page is left
> > as hwpoisoned hugepage. It's useful if we can retry to dissolve it to
> > save healthy pages, but that's not possible now because the information
> > about where the raw error page is lost.
> > 
> > Use the private field of a tail page to keep that information.  The code
> 
> Only one raw error page is saved now. Should this be ok? I think so as memory
> failure should be rare anyway?

This is a good point.  It might be rare, but maybe we need some consideration
on it. Some ideas in my mind below ...

- using struct page of all subpages is not compatible with hugetlb_free_vmemmap,
  so it's not desirable.
- defining a linked list starting from hpage[SUBPAGE_INDEX_HWPOISON].private
  might be a solution to save the multiple offsets.
- hacking bits in hpage[SUBPAGE_INDEX_HWPOISON].private field to save offset
  info in compressed format.  For example, for 2MB hugepage there could be
  512 offset numbers, so we can save one offset with 9 bits subfield.
  So we can save upto 7 offsets in the field.  This is not flexible and
  still can't handle many errors.
- maintaining global data structure to save the pfn of all hwpoison pages
  in the system. This might sound overkilling for the current purpose,
  but this data structure might be helpful for other purpose, so in the long
  run someone might get interested in it.

> 
> > path of shrinking hugepage pool used this info to try delayed dissolve.
> > 
> > Signed-off-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
> > ---
> >  include/linux/hugetlb.h | 24 ++++++++++++++++++++++++
> >  mm/hugetlb.c            |  9 +++++++++
> >  mm/memory-failure.c     |  2 ++
> >  3 files changed, 35 insertions(+)
> > 
> > diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> > index ac2a1d758a80..689e69cb556b 100644
> > --- a/include/linux/hugetlb.h
> > +++ b/include/linux/hugetlb.h
> > @@ -42,6 +42,9 @@ enum {
> >  	SUBPAGE_INDEX_CGROUP,		/* reuse page->private */
> >  	SUBPAGE_INDEX_CGROUP_RSVD,	/* reuse page->private */
> >  	__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
> > +#endif
> > +#ifdef CONFIG_CGROUP_HUGETLB
> > +	SUBPAGE_INDEX_HWPOISON,
> >  #endif
> 
> Do we rely on the CONFIG_CGROUP_HUGETLB to store the raw error page?

No. I meant CONFIG_MEMORY_FAILURE.
# I just copied and pasted the #ifdef line just above, and forget to update
# the CONFIG_* part :(

Thanks,
Naoya Horiguchi
Miaohe Lin April 28, 2022, 3:14 a.m. UTC | #3
On 2022/4/27 21:03, HORIGUCHI NAOYA(堀口 直也) wrote:
> On Wed, Apr 27, 2022 at 03:11:31PM +0800, Miaohe Lin wrote:
>> On 2022/4/27 12:28, Naoya Horiguchi wrote:
>>> From: Naoya Horiguchi <naoya.horiguchi@nec.com>
>>>
>>> When handling memory error on a hugetlb page, the error handler tries to
>>> dissolve and turn it into 4kB pages.  If it's successfully dissolved,
>>> PageHWPoison flag is moved to the raw error page, so but that's all
>>
>> s/so but/so/
> 
> Fixed, thank you.
> 
>>
>>> right.  However, dissolve sometimes fails, then the error page is left
>>> as hwpoisoned hugepage. It's useful if we can retry to dissolve it to
>>> save healthy pages, but that's not possible now because the information
>>> about where the raw error page is lost.
>>>
>>> Use the private field of a tail page to keep that information.  The code
>>
>> Only one raw error page is saved now. Should this be ok? I think so as memory
>> failure should be rare anyway?
> 
> This is a good point.  It might be rare, but maybe we need some consideration
> on it. Some ideas in my mind below ...
> 
> - using struct page of all subpages is not compatible with hugetlb_free_vmemmap,
>   so it's not desirable.

Yes, that is not compatible with hugetlb_free_vmemmap.

> - defining a linked list starting from hpage[SUBPAGE_INDEX_HWPOISON].private
>   might be a solution to save the multiple offsets.
> - hacking bits in hpage[SUBPAGE_INDEX_HWPOISON].private field to save offset
>   info in compressed format.  For example, for 2MB hugepage there could be
>   512 offset numbers, so we can save one offset with 9 bits subfield.
>   So we can save upto 7 offsets in the field.  This is not flexible and
>   still can't handle many errors.

If there are many errors inside one hugetlb page, there might be other serious problems.

> - maintaining global data structure to save the pfn of all hwpoison pages
>   in the system. This might sound overkilling for the current purpose,
>   but this data structure might be helpful for other purpose, so in the long
>   run someone might get interested in it.

The above ideas sound good. And last one might be more helpful in the memory re-online
case: We can restore the hwpoison info using this global data structure. :)

Thanks!

> 
>>
>>> path of shrinking hugepage pool used this info to try delayed dissolve.
>>>
>>> Signed-off-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
>>> ---
>>>  include/linux/hugetlb.h | 24 ++++++++++++++++++++++++
>>>  mm/hugetlb.c            |  9 +++++++++
>>>  mm/memory-failure.c     |  2 ++
>>>  3 files changed, 35 insertions(+)
>>>
>>> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
>>> index ac2a1d758a80..689e69cb556b 100644
>>> --- a/include/linux/hugetlb.h
>>> +++ b/include/linux/hugetlb.h
>>> @@ -42,6 +42,9 @@ enum {
>>>  	SUBPAGE_INDEX_CGROUP,		/* reuse page->private */
>>>  	SUBPAGE_INDEX_CGROUP_RSVD,	/* reuse page->private */
>>>  	__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
>>> +#endif
>>> +#ifdef CONFIG_CGROUP_HUGETLB
>>> +	SUBPAGE_INDEX_HWPOISON,
>>>  #endif
>>
>> Do we rely on the CONFIG_CGROUP_HUGETLB to store the raw error page?
> 
> No. I meant CONFIG_MEMORY_FAILURE.
> # I just copied and pasted the #ifdef line just above, and forget to update
> # the CONFIG_* part :(
> 
> Thanks,
> Naoya Horiguchi
>
Jane Chu May 12, 2022, 10:31 p.m. UTC | #4
On 4/26/2022 9:28 PM, Naoya Horiguchi wrote:
> From: Naoya Horiguchi <naoya.horiguchi@nec.com>
> 
> When handling memory error on a hugetlb page, the error handler tries to
> dissolve and turn it into 4kB pages.  If it's successfully dissolved,
> PageHWPoison flag is moved to the raw error page, so but that's all
> right.  However, dissolve sometimes fails, then the error page is left
> as hwpoisoned hugepage. It's useful if we can retry to dissolve it to
> save healthy pages, but that's not possible now because the information
> about where the raw error page is lost.
> 
> Use the private field of a tail page to keep that information.  The code
> path of shrinking hugepage pool used this info to try delayed dissolve.
> 
> Signed-off-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
> ---
>   include/linux/hugetlb.h | 24 ++++++++++++++++++++++++
>   mm/hugetlb.c            |  9 +++++++++
>   mm/memory-failure.c     |  2 ++
>   3 files changed, 35 insertions(+)
> 
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index ac2a1d758a80..689e69cb556b 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -42,6 +42,9 @@ enum {
>   	SUBPAGE_INDEX_CGROUP,		/* reuse page->private */
>   	SUBPAGE_INDEX_CGROUP_RSVD,	/* reuse page->private */
>   	__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
> +#endif
> +#ifdef CONFIG_CGROUP_HUGETLB
> +	SUBPAGE_INDEX_HWPOISON,
>   #endif
>   	__NR_USED_SUBPAGE,
>   };
> @@ -784,6 +787,27 @@ extern int dissolve_free_huge_page(struct page *page);
>   extern int dissolve_free_huge_pages(unsigned long start_pfn,
>   				    unsigned long end_pfn);
>   
> +#ifdef CONFIG_MEMORY_FAILURE
> +/*
> + * pointer to raw error page is located in hpage[SUBPAGE_INDEX_HWPOISON].private
> + */
> +static inline struct page *hugetlb_page_hwpoison(struct page *hpage)
> +{
> +	return (void *)page_private(hpage + SUBPAGE_INDEX_HWPOISON);
> +}
> +
> +static inline void hugetlb_set_page_hwpoison(struct page *hpage,
> +					struct page *page)
> +{
> +	set_page_private(hpage + SUBPAGE_INDEX_HWPOISON, (unsigned long)page);
> +}

What happens if the ->private field is already holding a poisoned page 
pointer?  that is, in a scenario of multiple poisoned pages within a 
hugepage, what to do?  mark the entire hpage poisoned?

thanks,
-jane


> +#else
> +static inline struct page *hugetlb_page_hwpoison(struct page *hpage)
> +{
> +	return NULL;
> +}
> +#endif
> +
>   #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
>   #ifndef arch_hugetlb_migration_supported
>   static inline bool arch_hugetlb_migration_supported(struct hstate *h)
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index f8e048b939c7..6867ea8345d1 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1547,6 +1547,15 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
>   		return;
>   	}
>   
> +	if (unlikely(PageHWPoison(page))) {
> +		struct page *raw_error = hugetlb_page_hwpoison(page);
> +
> +		if (raw_error && raw_error != page) {
> +			SetPageHWPoison(raw_error);
> +			ClearPageHWPoison(page);
> +		}
> +	}
> +
>   	for (i = 0; i < pages_per_huge_page(h);
>   	     i++, subpage = mem_map_next(subpage, page, i)) {
>   		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index 3e36fc19c4d1..73948a00ad4a 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -1535,6 +1535,8 @@ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
>   		goto out;
>   	}
>   
> +	hugetlb_set_page_hwpoison(head, page);
> +
>   	return ret;
>   out:
>   	if (count_increased)
HORIGUCHI NAOYA(堀口 直也) May 12, 2022, 10:49 p.m. UTC | #5
On Thu, May 12, 2022 at 10:31:42PM +0000, Jane Chu wrote:
> On 4/26/2022 9:28 PM, Naoya Horiguchi wrote:
> > From: Naoya Horiguchi <naoya.horiguchi@nec.com>
> > 
> > When handling memory error on a hugetlb page, the error handler tries to
> > dissolve and turn it into 4kB pages.  If it's successfully dissolved,
> > PageHWPoison flag is moved to the raw error page, so but that's all
> > right.  However, dissolve sometimes fails, then the error page is left
nnn> > as hwpoisoned hugepage. It's useful if we can retry to dissolve it to
> > save healthy pages, but that's not possible now because the information
> > about where the raw error page is lost.
> > 
> > Use the private field of a tail page to keep that information.  The code
> > path of shrinking hugepage pool used this info to try delayed dissolve.
> > 
> > Signed-off-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
> > ---
> >   include/linux/hugetlb.h | 24 ++++++++++++++++++++++++
> >   mm/hugetlb.c            |  9 +++++++++
> >   mm/memory-failure.c     |  2 ++
> >   3 files changed, 35 insertions(+)
> > 
> > diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> > index ac2a1d758a80..689e69cb556b 100644
> > --- a/include/linux/hugetlb.h
> > +++ b/include/linux/hugetlb.h
> > @@ -42,6 +42,9 @@ enum {
> >   	SUBPAGE_INDEX_CGROUP,		/* reuse page->private */
> >   	SUBPAGE_INDEX_CGROUP_RSVD,	/* reuse page->private */
> >   	__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
> > +#endif
> > +#ifdef CONFIG_CGROUP_HUGETLB
> > +	SUBPAGE_INDEX_HWPOISON,
> >   #endif
> >   	__NR_USED_SUBPAGE,
> >   };
> > @@ -784,6 +787,27 @@ extern int dissolve_free_huge_page(struct page *page);
> >   extern int dissolve_free_huge_pages(unsigned long start_pfn,
> >   				    unsigned long end_pfn);
> >   
> > +#ifdef CONFIG_MEMORY_FAILURE
> > +/*
> > + * pointer to raw error page is located in hpage[SUBPAGE_INDEX_HWPOISON].private
> > + */
> > +static inline struct page *hugetlb_page_hwpoison(struct page *hpage)
> > +{
> > +	return (void *)page_private(hpage + SUBPAGE_INDEX_HWPOISON);
> > +}
> > +
> > +static inline void hugetlb_set_page_hwpoison(struct page *hpage,
> > +					struct page *page)
> > +{
> > +	set_page_private(hpage + SUBPAGE_INDEX_HWPOISON, (unsigned long)page);
> > +}
> 
> What happens if the ->private field is already holding a poisoned page 
> pointer?  that is, in a scenario of multiple poisoned pages within a 
> hugepage, what to do?  mark the entire hpage poisoned?

Hi Jane,

Current version does not consider multiple poisoned pages scenario,
so if that happens, ->private field would be simply overwritten.
But in this patch hugetlb_set_page_hwpoison() is called just after
"if (TestSetPageHWPoison(head))" check, so hugetlb_set_page_hwpoison()
is not expected to be called more than once on a single hugepage.

When we try to support multiple poison scenario, we may add some code
in "already hwpoisoned" path to store additional info about the raw
error page. The implementation detail is still to be determined.

Thanks,
Naoya Horiguchi
diff mbox series

Patch

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ac2a1d758a80..689e69cb556b 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -42,6 +42,9 @@  enum {
 	SUBPAGE_INDEX_CGROUP,		/* reuse page->private */
 	SUBPAGE_INDEX_CGROUP_RSVD,	/* reuse page->private */
 	__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
+#endif
+#ifdef CONFIG_CGROUP_HUGETLB
+	SUBPAGE_INDEX_HWPOISON,
 #endif
 	__NR_USED_SUBPAGE,
 };
@@ -784,6 +787,27 @@  extern int dissolve_free_huge_page(struct page *page);
 extern int dissolve_free_huge_pages(unsigned long start_pfn,
 				    unsigned long end_pfn);
 
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * pointer to raw error page is located in hpage[SUBPAGE_INDEX_HWPOISON].private
+ */
+static inline struct page *hugetlb_page_hwpoison(struct page *hpage)
+{
+	return (void *)page_private(hpage + SUBPAGE_INDEX_HWPOISON);
+}
+
+static inline void hugetlb_set_page_hwpoison(struct page *hpage,
+					struct page *page)
+{
+	set_page_private(hpage + SUBPAGE_INDEX_HWPOISON, (unsigned long)page);
+}
+#else
+static inline struct page *hugetlb_page_hwpoison(struct page *hpage)
+{
+	return NULL;
+}
+#endif
+
 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 #ifndef arch_hugetlb_migration_supported
 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f8e048b939c7..6867ea8345d1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1547,6 +1547,15 @@  static void __update_and_free_page(struct hstate *h, struct page *page)
 		return;
 	}
 
+	if (unlikely(PageHWPoison(page))) {
+		struct page *raw_error = hugetlb_page_hwpoison(page);
+
+		if (raw_error && raw_error != page) {
+			SetPageHWPoison(raw_error);
+			ClearPageHWPoison(page);
+		}
+	}
+
 	for (i = 0; i < pages_per_huge_page(h);
 	     i++, subpage = mem_map_next(subpage, page, i)) {
 		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 3e36fc19c4d1..73948a00ad4a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1535,6 +1535,8 @@  int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
 		goto out;
 	}
 
+	hugetlb_set_page_hwpoison(head, page);
+
 	return ret;
 out:
 	if (count_increased)