diff mbox series

[v8,07/12] mm/hugetlb: Set the PageHWPoison to the raw error page

Message ID 20201210035526.38938-8-songmuchun@bytedance.com (mailing list archive)
State New, archived
Headers show
Series Free some vmemmap pages of HugeTLB page | expand

Commit Message

Muchun Song Dec. 10, 2020, 3:55 a.m. UTC
Because we reuse the first tail vmemmap page frame and remap it
with read-only, we cannot set the PageHWPosion on a tail page.
So we can use the head[4].mapping to record the real error page
index and set the raw error page PageHWPoison later.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 mm/hugetlb.c | 52 ++++++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 44 insertions(+), 8 deletions(-)

Comments

Muchun Song Dec. 10, 2020, 11:11 a.m. UTC | #1
On Thu, Dec 10, 2020 at 11:58 AM Muchun Song <songmuchun@bytedance.com> wrote:
>
> Because we reuse the first tail vmemmap page frame and remap it
> with read-only, we cannot set the PageHWPosion on a tail page.
> So we can use the head[4].mapping to record the real error page
                              ^^^
                             private

A typo. Will update the next version. Thanks.

> index and set the raw error page PageHWPoison later.
>
> Signed-off-by: Muchun Song <songmuchun@bytedance.com>
> ---
>  mm/hugetlb.c | 52 ++++++++++++++++++++++++++++++++++++++++++++--------
>  1 file changed, 44 insertions(+), 8 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 542e6cb81321..06157df08d8e 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1347,6 +1347,47 @@ static inline void __update_and_free_page(struct hstate *h, struct page *page)
>                 schedule_work(&hpage_update_work);
>  }
>
> +static inline void subpage_hwpoison_deliver(struct hstate *h, struct page *head)
> +{
> +       struct page *page = head;
> +
> +       if (!free_vmemmap_pages_per_hpage(h))
> +               return;
> +
> +       if (PageHWPoison(head))
> +               page = head + page_private(head + 4);
> +
> +       /*
> +        * Move PageHWPoison flag from head page to the raw error page,
> +        * which makes any subpages rather than the error page reusable.
> +        */
> +       if (page != head) {
> +               SetPageHWPoison(page);
> +               ClearPageHWPoison(head);
> +       }
> +}
> +
> +static inline void set_subpage_hwpoison(struct hstate *h, struct page *head,
> +                                       struct page *page)
> +{
> +       if (!PageHWPoison(head))
> +               return;
> +
> +       if (free_vmemmap_pages_per_hpage(h)) {
> +               set_page_private(head + 4, page - head);
> +               return;
> +       }
> +
> +       /*
> +        * Move PageHWPoison flag from head page to the raw error page,
> +        * which makes any subpages rather than the error page reusable.
> +        */
> +       if (page != head) {
> +               SetPageHWPoison(page);
> +               ClearPageHWPoison(head);
> +       }
> +}
> +
>  static void update_and_free_page(struct hstate *h, struct page *page)
>  {
>         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
> @@ -1363,6 +1404,7 @@ static void __free_hugepage(struct hstate *h, struct page *page)
>         int i;
>
>         alloc_huge_page_vmemmap(h, page);
> +       subpage_hwpoison_deliver(h, page);
>
>         for (i = 0; i < pages_per_huge_page(h); i++) {
>                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
> @@ -1840,14 +1882,8 @@ int dissolve_free_huge_page(struct page *page)
>                 int nid = page_to_nid(head);
>                 if (h->free_huge_pages - h->resv_huge_pages == 0)
>                         goto out;
> -               /*
> -                * Move PageHWPoison flag from head page to the raw error page,
> -                * which makes any subpages rather than the error page reusable.
> -                */
> -               if (PageHWPoison(head) && page != head) {
> -                       SetPageHWPoison(page);
> -                       ClearPageHWPoison(head);
> -               }
> +
> +               set_subpage_hwpoison(h, head, page);
>                 list_del(&head->lru);
>                 h->free_huge_pages--;
>                 h->free_huge_pages_node[nid]--;
> --
> 2.11.0
>
Oscar Salvador Dec. 11, 2020, 1:36 p.m. UTC | #2
On Thu, Dec 10, 2020 at 11:55:21AM +0800, Muchun Song wrote:
> +static inline void subpage_hwpoison_deliver(struct hstate *h, struct page *head)
> +{
> +	struct page *page = head;
> +
> +	if (!free_vmemmap_pages_per_hpage(h))
> +		return;
> +
> +	if (PageHWPoison(head))
> +		page = head + page_private(head + 4);
> +
> +	/*
> +	 * Move PageHWPoison flag from head page to the raw error page,
> +	 * which makes any subpages rather than the error page reusable.
> +	 */
> +	if (page != head) {
> +		SetPageHWPoison(page);
> +		ClearPageHWPoison(head);
> +	}
> +}

I would make the names coherent.
I am not definitely goot at names, but something like:
hwpoison_subpage_{foo,bar} looks better.

Also, could not subpage_hwpoison_deliver be rewritten like:

  static inline void subpage_hwpoison_deliver(struct hstate *h, struct page *head)
  {
       struct page *page;
  
       if (!PageHWPoison(head) || !free_vmemmap_pages_per_hpage(h))
               return;
  
       page = head + page_private(head + 4);
       /*
        * Move PageHWPoison flag from head page to the raw error page,
        * which makes any subpages rather than the error page reusable.
        */
       if (page != head) {
               SetPageHWPoison(page);
               ClearPageHWPoison(head);
       }
  }

I think it is better code-wise.

> +	 * Move PageHWPoison flag from head page to the raw error page,
> +	 * which makes any subpages rather than the error page reusable.
> +	 */
> +	if (page != head) {
> +		SetPageHWPoison(page);
> +		ClearPageHWPoison(head);
> +	}

I would put this in an else-if above:

	if (free_vmemmap_pages_per_hpage(h)) {
		set_page_private(head + 4, page - head);
	        return;
	} else if (page != head) {
		SetPageHWPoison(page);
		ClearPageHWPoison(head);
	}

or will we lose the optimization in case free_vmemmap_pages_per_hpage gets compiled out?
Muchun Song Dec. 11, 2020, 2:08 p.m. UTC | #3
On Fri, Dec 11, 2020 at 9:36 PM Oscar Salvador <osalvador@suse.de> wrote:
>
> On Thu, Dec 10, 2020 at 11:55:21AM +0800, Muchun Song wrote:
> > +static inline void subpage_hwpoison_deliver(struct hstate *h, struct page *head)
> > +{
> > +     struct page *page = head;
> > +
> > +     if (!free_vmemmap_pages_per_hpage(h))
> > +             return;
> > +
> > +     if (PageHWPoison(head))
> > +             page = head + page_private(head + 4);
> > +
> > +     /*
> > +      * Move PageHWPoison flag from head page to the raw error page,
> > +      * which makes any subpages rather than the error page reusable.
> > +      */
> > +     if (page != head) {
> > +             SetPageHWPoison(page);
> > +             ClearPageHWPoison(head);
> > +     }
> > +}
>
> I would make the names coherent.
> I am not definitely goot at names, but something like:
> hwpoison_subpage_{foo,bar} looks better.

It's better than mine. Thank you.

>
> Also, could not subpage_hwpoison_deliver be rewritten like:
>
>   static inline void subpage_hwpoison_deliver(struct hstate *h, struct page *head)
>   {
>        struct page *page;
>
>        if (!PageHWPoison(head) || !free_vmemmap_pages_per_hpage(h))
>                return;
>
>        page = head + page_private(head + 4);
>        /*
>         * Move PageHWPoison flag from head page to the raw error page,
>         * which makes any subpages rather than the error page reusable.
>         */
>        if (page != head) {
>                SetPageHWPoison(page);
>                ClearPageHWPoison(head);
>        }
>   }
>
> I think it is better code-wise.

Will do. Thank you.

>
> > +      * Move PageHWPoison flag from head page to the raw error page,
> > +      * which makes any subpages rather than the error page reusable.
> > +      */
> > +     if (page != head) {
> > +             SetPageHWPoison(page);
> > +             ClearPageHWPoison(head);
> > +     }
>
> I would put this in an else-if above:
>
>         if (free_vmemmap_pages_per_hpage(h)) {
>                 set_page_private(head + 4, page - head);
>                 return;
>         } else if (page != head) {
>                 SetPageHWPoison(page);
>                 ClearPageHWPoison(head);
>         }
>
> or will we lose the optimization in case free_vmemmap_pages_per_hpage gets compiled out?
>

Either is OK. The compiler will help us optimize the code when
free_vmemmap_pages_per_hpage always returns false.

Thanks for your suggestions. :-)

>
> --
> Oscar Salvador
> SUSE L3
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 542e6cb81321..06157df08d8e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1347,6 +1347,47 @@  static inline void __update_and_free_page(struct hstate *h, struct page *page)
 		schedule_work(&hpage_update_work);
 }
 
+static inline void subpage_hwpoison_deliver(struct hstate *h, struct page *head)
+{
+	struct page *page = head;
+
+	if (!free_vmemmap_pages_per_hpage(h))
+		return;
+
+	if (PageHWPoison(head))
+		page = head + page_private(head + 4);
+
+	/*
+	 * Move PageHWPoison flag from head page to the raw error page,
+	 * which makes any subpages rather than the error page reusable.
+	 */
+	if (page != head) {
+		SetPageHWPoison(page);
+		ClearPageHWPoison(head);
+	}
+}
+
+static inline void set_subpage_hwpoison(struct hstate *h, struct page *head,
+					struct page *page)
+{
+	if (!PageHWPoison(head))
+		return;
+
+	if (free_vmemmap_pages_per_hpage(h)) {
+		set_page_private(head + 4, page - head);
+		return;
+	}
+
+	/*
+	 * Move PageHWPoison flag from head page to the raw error page,
+	 * which makes any subpages rather than the error page reusable.
+	 */
+	if (page != head) {
+		SetPageHWPoison(page);
+		ClearPageHWPoison(head);
+	}
+}
+
 static void update_and_free_page(struct hstate *h, struct page *page)
 {
 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
@@ -1363,6 +1404,7 @@  static void __free_hugepage(struct hstate *h, struct page *page)
 	int i;
 
 	alloc_huge_page_vmemmap(h, page);
+	subpage_hwpoison_deliver(h, page);
 
 	for (i = 0; i < pages_per_huge_page(h); i++) {
 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
@@ -1840,14 +1882,8 @@  int dissolve_free_huge_page(struct page *page)
 		int nid = page_to_nid(head);
 		if (h->free_huge_pages - h->resv_huge_pages == 0)
 			goto out;
-		/*
-		 * Move PageHWPoison flag from head page to the raw error page,
-		 * which makes any subpages rather than the error page reusable.
-		 */
-		if (PageHWPoison(head) && page != head) {
-			SetPageHWPoison(page);
-			ClearPageHWPoison(head);
-		}
+
+		set_subpage_hwpoison(h, head, page);
 		list_del(&head->lru);
 		h->free_huge_pages--;
 		h->free_huge_pages_node[nid]--;