diff mbox

[10/11] mm, memory_failure: teach memory_failure() about dev_pagemap pages

Message ID 152700002461.24093.13281217260996107277.stgit@dwillia2-desk3.amr.corp.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Dan Williams May 22, 2018, 2:40 p.m. UTC
mce: Uncorrected hardware memory error in user-access at af34214200
    {1}[Hardware Error]: It has been corrected by h/w and requires no further action
    mce: [Hardware Error]: Machine check events logged
    {1}[Hardware Error]: event severity: corrected
    Memory failure: 0xaf34214: reserved kernel page still referenced by 1 users
    [..]
    Memory failure: 0xaf34214: recovery action for reserved kernel page: Failed
    mce: Memory error not recovered

In contrast to typical memory, dev_pagemap pages may be dax mapped. With
dax there is no possibility to map in another page dynamically since dax
establishes 1:1 physical address to file offset associations. Also
dev_pagemap pages associated with NVDIMM / persistent memory devices can
internal remap/repair addresses with poison. While memory_failure()
assumes that it can discard typical poisoned pages and keep them
unmapped indefinitely, dev_pagemap pages may be returned to service
after the error is cleared.

Teach memory_failure() to detect and handle MEMORY_DEVICE_HOST
dev_pagemap pages that have poison consumed by userspace. Mark the
memory as UC instead of unmapping it completely to allow ongoing access
via the device driver (nd_pmem). Later, nd_pmem will grow support for
marking the page back to WB when the error is cleared.

Cc: Jan Kara <jack@suse.cz>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 mm/memory-failure.c |  117 +++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 117 insertions(+)

Comments

Naoya Horiguchi May 23, 2018, 6:48 a.m. UTC | #1
On Tue, May 22, 2018 at 07:40:24AM -0700, Dan Williams wrote:
>     mce: Uncorrected hardware memory error in user-access at af34214200

>     {1}[Hardware Error]: It has been corrected by h/w and requires no further action

>     mce: [Hardware Error]: Machine check events logged

>     {1}[Hardware Error]: event severity: corrected

>     Memory failure: 0xaf34214: reserved kernel page still referenced by 1 users

>     [..]

>     Memory failure: 0xaf34214: recovery action for reserved kernel page: Failed

>     mce: Memory error not recovered

> 

> In contrast to typical memory, dev_pagemap pages may be dax mapped. With

> dax there is no possibility to map in another page dynamically since dax

> establishes 1:1 physical address to file offset associations. Also

> dev_pagemap pages associated with NVDIMM / persistent memory devices can

> internal remap/repair addresses with poison. While memory_failure()

> assumes that it can discard typical poisoned pages and keep them

> unmapped indefinitely, dev_pagemap pages may be returned to service

> after the error is cleared.

> 

> Teach memory_failure() to detect and handle MEMORY_DEVICE_HOST

> dev_pagemap pages that have poison consumed by userspace. Mark the

> memory as UC instead of unmapping it completely to allow ongoing access

> via the device driver (nd_pmem). Later, nd_pmem will grow support for

> marking the page back to WB when the error is cleared.

> 

> Cc: Jan Kara <jack@suse.cz>

> Cc: Christoph Hellwig <hch@lst.de>

> Cc: Jérôme Glisse <jglisse@redhat.com>

> Cc: Matthew Wilcox <mawilcox@microsoft.com>

> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>

> Cc: Ross Zwisler <ross.zwisler@linux.intel.com>

> Signed-off-by: Dan Williams <dan.j.williams@intel.com>

> ---

>  mm/memory-failure.c |  117 +++++++++++++++++++++++++++++++++++++++++++++++++++

>  1 file changed, 117 insertions(+)

> 

> diff --git a/mm/memory-failure.c b/mm/memory-failure.c

> index 42a193ee14d3..f95036f99a79 100644

> --- a/mm/memory-failure.c

> +++ b/mm/memory-failure.c

> @@ -55,6 +55,7 @@

>  #include <linux/hugetlb.h>

>  #include <linux/memory_hotplug.h>

>  #include <linux/mm_inline.h>

> +#include <linux/memremap.h>

>  #include <linux/kfifo.h>

>  #include <linux/ratelimit.h>

>  #include "internal.h"

> @@ -1112,6 +1113,117 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)

>  	return res;

>  }

>  

> +static unsigned long dax_mapping_size(struct page *page)

> +{

> +	struct address_space *mapping = page->mapping;

> +	pgoff_t pgoff = page_to_pgoff(page);

> +	struct vm_area_struct *vma;

> +	unsigned long size = 0;

> +

> +	i_mmap_lock_read(mapping);

> +	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {

> +		unsigned long address = vma_address(page, vma);

> +		pgd_t *pgd;

> +		p4d_t *p4d;

> +		pud_t *pud;

> +		pmd_t *pmd;

> +		pte_t *pte;

> +

> +		pgd = pgd_offset(vma->vm_mm, address);

> +		if (!pgd_present(*pgd))

> +			continue;

> +		p4d = p4d_offset(pgd, address);

> +		if (!p4d_present(*p4d))

> +			continue;

> +		pud = pud_offset(p4d, address);

> +		if (!pud_present(*pud))

> +			continue;

> +		if (pud_devmap(*pud)) {

> +			size = PUD_SIZE;

> +			break;

> +		}

> +		pmd = pmd_offset(pud, address);

> +		if (!pmd_present(*pmd))

> +			continue;

> +		if (pmd_devmap(*pmd)) {

> +			size = PMD_SIZE;

> +			break;

> +		}

> +		pte = pte_offset_map(pmd, address);

> +		if (!pte_present(*pte))

> +			continue;

> +		if (pte_devmap(*pte)) {

> +			size = PAGE_SIZE;

> +			break;

> +		}

> +	}

> +	i_mmap_unlock_read(mapping);

> +	return size;

> +}

> +

> +static int memory_failure_dev_pagemap(unsigned long pfn, int flags,

> +		struct dev_pagemap *pgmap)

> +{

> +	struct page *page = pfn_to_page(pfn);

> +	const bool unmap_success = true;

> +	unsigned long size;

> +	LIST_HEAD(tokill);

> +	int rc = -EBUSY;

> +	loff_t start;

> +

> +	lock_page(page);

> +	if (hwpoison_filter(page)) {

> +		rc = 0;

> +		goto out;

> +	}

> +

> +	switch (pgmap->type) {

> +	case MEMORY_DEVICE_PRIVATE:

> +	case MEMORY_DEVICE_PUBLIC:

> +		/*

> +		 * TODO: Handle HMM pages which may need coordination

> +		 * with device-side memory.

> +		 */

> +		goto out;

> +	default:

> +		if (!page->mapping)

> +			goto out;

> +		break;

> +	}

> +

> +	/*

> +	 * If the page is not mapped in userspace then report it as

> +	 * unhandled.

> +	 */

> +	size = dax_mapping_size(page);

> +	if (!size) {

> +		pr_err("Memory failure: %#lx: failed to unmap page\n", pfn);

> +		goto out;

> +	}

> +

> +	SetPageHWPoison(page);

> +

> +	/*

> +	 * Unlike System-RAM there is no possibility to swap in a

> +	 * different physical page at a given virtual address, so all

> +	 * userspace consumption of ZONE_DEVICE memory necessitates

> +	 * SIGBUS (i.e. MF_MUST_KILL)

> +	 */

> +	flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;

> +	collect_procs(page, &tokill, flags & MF_ACTION_REQUIRED);

> +

> +	start = (page->index << PAGE_SHIFT) & ~(size - 1);

> +	unmap_mapping_range(page->mapping, start, start + size, 0);

> +

> +	kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, ilog2(size),

> +			pfn, flags);

> +	rc = 0;

> +out:

> +	unlock_page(page);


I wrote as below in reply to 7/11

> > @@ -651,17 +653,20 @@ static int madvise_inject_error(int behavior,

> >  

> >               if (behavior == MADV_SOFT_OFFLINE) {

> >                       pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",

> > -                                             page_to_pfn(page), start);

> > +                                     pfn, start);

> >  

> >                       ret = soft_offline_page(page, MF_COUNT_INCREASED);

> > +                     put_page(page);

> >                       if (ret)

> >                               return ret;

> >                       continue;

> >               }

> > +             put_page(page);

> 

> We keep the page count pinned after the isolation of the error page

> in order to make sure that the error page is disabled and never reused.

> This seems not explicit enough, so some comment should be helpful.


... but it was lack of words, sorry. More precisely, a refcount incremented
before calling memory_failure() is kept only when the error page is in-use
as a normal lru page when an error happens on it and it's successfully handled.
The reason of this behavior (along with avoiding the risk of unexpected reusing)
is to make sure that unpoison (cancelling mechanism of hwpoison) can trigger
page freeing code (__put_page() for normal pages).
But I think that this tricky behavior comes from historical reason and
we can go without it, so I don't think you have to inherit it for new code.

(Although I'm not familiar with dax,) if dev_pagemap page has a different
lifecycle from one of normal pages and it has its own mechanism of cancelling
memory error, then you can simply release the page refcount at the end of
memory_failure_dev_pagemap().


I have another comment about page refcount. memory_failure() is sometimes
called with pinning and sometimes called without pinning, which is indicated
by MF_COUNT_INCREASED flag. When MF_COUNT_INCREASED is not set,
memory_failure() tries to take it by itself.
So you might need some adjustment for non-MF_COUNT_INCREASED case.

Thanks,
Naoya Horiguchi

> +	put_dev_pagemap(pgmap);

> +	return rc;

> +}

> +

>  /**

>   * memory_failure - Handle memory failure of a page.

>   * @pfn: Page Number of the corrupted page

> @@ -1134,6 +1246,7 @@ int memory_failure(unsigned long pfn, int flags)

>  	struct page *p;

>  	struct page *hpage;

>  	struct page *orig_head;

> +	struct dev_pagemap *pgmap;

>  	int res;

>  	unsigned long page_flags;

>  

> @@ -1146,6 +1259,10 @@ int memory_failure(unsigned long pfn, int flags)

>  		return -ENXIO;

>  	}

>  

> +	pgmap = get_dev_pagemap(pfn, NULL);

> +	if (pgmap)

> +		return memory_failure_dev_pagemap(pfn, flags, pgmap);

> +

>  	p = pfn_to_page(pfn);

>  	if (PageHuge(p))

>  		return memory_failure_hugetlb(pfn, flags);

> 

>
diff mbox

Patch

diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 42a193ee14d3..f95036f99a79 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -55,6 +55,7 @@ 
 #include <linux/hugetlb.h>
 #include <linux/memory_hotplug.h>
 #include <linux/mm_inline.h>
+#include <linux/memremap.h>
 #include <linux/kfifo.h>
 #include <linux/ratelimit.h>
 #include "internal.h"
@@ -1112,6 +1113,117 @@  static int memory_failure_hugetlb(unsigned long pfn, int flags)
 	return res;
 }
 
+static unsigned long dax_mapping_size(struct page *page)
+{
+	struct address_space *mapping = page->mapping;
+	pgoff_t pgoff = page_to_pgoff(page);
+	struct vm_area_struct *vma;
+	unsigned long size = 0;
+
+	i_mmap_lock_read(mapping);
+	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
+		unsigned long address = vma_address(page, vma);
+		pgd_t *pgd;
+		p4d_t *p4d;
+		pud_t *pud;
+		pmd_t *pmd;
+		pte_t *pte;
+
+		pgd = pgd_offset(vma->vm_mm, address);
+		if (!pgd_present(*pgd))
+			continue;
+		p4d = p4d_offset(pgd, address);
+		if (!p4d_present(*p4d))
+			continue;
+		pud = pud_offset(p4d, address);
+		if (!pud_present(*pud))
+			continue;
+		if (pud_devmap(*pud)) {
+			size = PUD_SIZE;
+			break;
+		}
+		pmd = pmd_offset(pud, address);
+		if (!pmd_present(*pmd))
+			continue;
+		if (pmd_devmap(*pmd)) {
+			size = PMD_SIZE;
+			break;
+		}
+		pte = pte_offset_map(pmd, address);
+		if (!pte_present(*pte))
+			continue;
+		if (pte_devmap(*pte)) {
+			size = PAGE_SIZE;
+			break;
+		}
+	}
+	i_mmap_unlock_read(mapping);
+	return size;
+}
+
+static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
+		struct dev_pagemap *pgmap)
+{
+	struct page *page = pfn_to_page(pfn);
+	const bool unmap_success = true;
+	unsigned long size;
+	LIST_HEAD(tokill);
+	int rc = -EBUSY;
+	loff_t start;
+
+	lock_page(page);
+	if (hwpoison_filter(page)) {
+		rc = 0;
+		goto out;
+	}
+
+	switch (pgmap->type) {
+	case MEMORY_DEVICE_PRIVATE:
+	case MEMORY_DEVICE_PUBLIC:
+		/*
+		 * TODO: Handle HMM pages which may need coordination
+		 * with device-side memory.
+		 */
+		goto out;
+	default:
+		if (!page->mapping)
+			goto out;
+		break;
+	}
+
+	/*
+	 * If the page is not mapped in userspace then report it as
+	 * unhandled.
+	 */
+	size = dax_mapping_size(page);
+	if (!size) {
+		pr_err("Memory failure: %#lx: failed to unmap page\n", pfn);
+		goto out;
+	}
+
+	SetPageHWPoison(page);
+
+	/*
+	 * Unlike System-RAM there is no possibility to swap in a
+	 * different physical page at a given virtual address, so all
+	 * userspace consumption of ZONE_DEVICE memory necessitates
+	 * SIGBUS (i.e. MF_MUST_KILL)
+	 */
+	flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
+	collect_procs(page, &tokill, flags & MF_ACTION_REQUIRED);
+
+	start = (page->index << PAGE_SHIFT) & ~(size - 1);
+	unmap_mapping_range(page->mapping, start, start + size, 0);
+
+	kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, ilog2(size),
+			pfn, flags);
+	rc = 0;
+out:
+	unlock_page(page);
+	put_dev_pagemap(pgmap);
+	return rc;
+}
+
 /**
  * memory_failure - Handle memory failure of a page.
  * @pfn: Page Number of the corrupted page
@@ -1134,6 +1246,7 @@  int memory_failure(unsigned long pfn, int flags)
 	struct page *p;
 	struct page *hpage;
 	struct page *orig_head;
+	struct dev_pagemap *pgmap;
 	int res;
 	unsigned long page_flags;
 
@@ -1146,6 +1259,10 @@  int memory_failure(unsigned long pfn, int flags)
 		return -ENXIO;
 	}
 
+	pgmap = get_dev_pagemap(pfn, NULL);
+	if (pgmap)
+		return memory_failure_dev_pagemap(pfn, flags, pgmap);
+
 	p = pfn_to_page(pfn);
 	if (PageHuge(p))
 		return memory_failure_hugetlb(pfn, flags);