diff mbox series

[v4] mm: hwposion: support recovery from ksm_might_need_to_copy()

Message ID 20230201074433.96641-1-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series [v4] mm: hwposion: support recovery from ksm_might_need_to_copy() | expand

Commit Message

Kefeng Wang Feb. 1, 2023, 7:44 a.m. UTC
When the kernel copy a page from ksm_might_need_to_copy(), but runs
into an uncorrectable error, it will crash since poisoned page is
consumed by kernel, this is similar to the issue recently fixed by
Copy-on-write poison recovery.

When an error is detected during the page copy, return VM_FAULT_HWPOISON
in do_swap_page(), and install a hwpoison entry in unuse_pte() when
swapoff, which help us to avoid system crash. Note, memory failure on
a KSM page will be skipped, but still call memory_failure_queue() to
be consistent with general memory failure process, and we could support
KSM page recovery in the feature.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
v4:
- update changelog and directly return ERR_PTR(-EHWPOISON) in
  ksm_might_need_to_copy() suggested HORIGUCHI NAOYA
- add back unlikely in unuse_pte()

 mm/ksm.c      |  7 +++++--
 mm/memory.c   |  3 +++
 mm/swapfile.c | 20 ++++++++++++++------
 3 files changed, 22 insertions(+), 8 deletions(-)

Comments

Andrew Morton Feb. 4, 2023, 1:46 a.m. UTC | #1
On Wed, 1 Feb 2023 15:44:33 +0800 Kefeng Wang <wangkefeng.wang@huawei.com> wrote:

> When the kernel copy a page from ksm_might_need_to_copy(), but runs
> into an uncorrectable error, it will crash since poisoned page is
> consumed by kernel, this is similar to the issue recently fixed by
> Copy-on-write poison recovery.
> 
> When an error is detected during the page copy, return VM_FAULT_HWPOISON
> in do_swap_page(), and install a hwpoison entry in unuse_pte() when
> swapoff, which help us to avoid system crash. Note, memory failure on
> a KSM page will be skipped, but still call memory_failure_queue() to
> be consistent with general memory failure process, and we could support
> KSM page recovery in the feature.
> 

Some review input would be helpful.

Are we able to identify a Fixes: target for this?

I assume that a -stable backport is desirable?

Thanks.

> --- a/mm/ksm.c
> +++ b/mm/ksm.c
> @@ -2629,8 +2629,11 @@ struct page *ksm_might_need_to_copy(struct page *page,
>  		new_page = NULL;
>  	}
>  	if (new_page) {
> -		copy_user_highpage(new_page, page, address, vma);
> -
> +		if (copy_mc_user_highpage(new_page, page, address, vma)) {
> +			put_page(new_page);
> +			memory_failure_queue(page_to_pfn(page), 0);
> +			return ERR_PTR(-EHWPOISON);
> +		}
>  		SetPageDirty(new_page);
>  		__SetPageUptodate(new_page);
>  		__SetPageLocked(new_page);
> diff --git a/mm/memory.c b/mm/memory.c
> index aad226daf41b..5b2c137dfb2a 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3840,6 +3840,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  		if (unlikely(!page)) {
>  			ret = VM_FAULT_OOM;
>  			goto out_page;
> +		} else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
> +			ret = VM_FAULT_HWPOISON;
> +			goto out_page;
>  		}
>  		folio = page_folio(page);
>  
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index 908a529bca12..3ef2468d7130 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -1763,12 +1763,15 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
>  	struct page *swapcache;
>  	spinlock_t *ptl;
>  	pte_t *pte, new_pte;
> +	bool hwposioned = false;
>  	int ret = 1;
>  
>  	swapcache = page;
>  	page = ksm_might_need_to_copy(page, vma, addr);
>  	if (unlikely(!page))
>  		return -ENOMEM;
> +	else if (unlikely(PTR_ERR(page) == -EHWPOISON))
> +		hwposioned = true;
>  
>  	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
>  	if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
> @@ -1776,15 +1779,19 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
>  		goto out;
>  	}
>  
> -	if (unlikely(!PageUptodate(page))) {
> -		pte_t pteval;
> +	if (unlikely(hwposioned || !PageUptodate(page))) {
> +		swp_entry_t swp_entry;
>  
>  		dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
> -		pteval = swp_entry_to_pte(make_swapin_error_entry());
> -		set_pte_at(vma->vm_mm, addr, pte, pteval);
> -		swap_free(entry);
> +		if (hwposioned) {
> +			swp_entry = make_hwpoison_entry(swapcache);
> +			page = swapcache;
> +		} else {
> +			swp_entry = make_swapin_error_entry();
> +		}
> +		new_pte = swp_entry_to_pte(swp_entry);
>  		ret = 0;
> -		goto out;
> +		goto setpte;
>  	}
>  
>  	/* See do_swap_page() */
> @@ -1816,6 +1823,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
>  		new_pte = pte_mksoft_dirty(new_pte);
>  	if (pte_swp_uffd_wp(*pte))
>  		new_pte = pte_mkuffd_wp(new_pte);
> +setpte:
>  	set_pte_at(vma->vm_mm, addr, pte, new_pte);
>  	swap_free(entry);
>  out:
> -- 
> 2.35.3
>
Naoya Horiguchi Feb. 7, 2023, 10:38 p.m. UTC | #2
On Wed, Feb 01, 2023 at 03:44:33PM +0800, Kefeng Wang wrote:
> When the kernel copy a page from ksm_might_need_to_copy(), but runs
> into an uncorrectable error, it will crash since poisoned page is
> consumed by kernel, this is similar to the issue recently fixed by
> Copy-on-write poison recovery.
> 
> When an error is detected during the page copy, return VM_FAULT_HWPOISON
> in do_swap_page(), and install a hwpoison entry in unuse_pte() when
> swapoff, which help us to avoid system crash. Note, memory failure on
> a KSM page will be skipped, but still call memory_failure_queue() to
> be consistent with general memory failure process, and we could support
> KSM page recovery in the feature.
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>

Looks good to me, thank you.

Reviewed-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
diff mbox series

Patch

diff --git a/mm/ksm.c b/mm/ksm.c
index dd02780c387f..addf490da146 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2629,8 +2629,11 @@  struct page *ksm_might_need_to_copy(struct page *page,
 		new_page = NULL;
 	}
 	if (new_page) {
-		copy_user_highpage(new_page, page, address, vma);
-
+		if (copy_mc_user_highpage(new_page, page, address, vma)) {
+			put_page(new_page);
+			memory_failure_queue(page_to_pfn(page), 0);
+			return ERR_PTR(-EHWPOISON);
+		}
 		SetPageDirty(new_page);
 		__SetPageUptodate(new_page);
 		__SetPageLocked(new_page);
diff --git a/mm/memory.c b/mm/memory.c
index aad226daf41b..5b2c137dfb2a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3840,6 +3840,9 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 		if (unlikely(!page)) {
 			ret = VM_FAULT_OOM;
 			goto out_page;
+		} else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
+			ret = VM_FAULT_HWPOISON;
+			goto out_page;
 		}
 		folio = page_folio(page);
 
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 908a529bca12..3ef2468d7130 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1763,12 +1763,15 @@  static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 	struct page *swapcache;
 	spinlock_t *ptl;
 	pte_t *pte, new_pte;
+	bool hwposioned = false;
 	int ret = 1;
 
 	swapcache = page;
 	page = ksm_might_need_to_copy(page, vma, addr);
 	if (unlikely(!page))
 		return -ENOMEM;
+	else if (unlikely(PTR_ERR(page) == -EHWPOISON))
+		hwposioned = true;
 
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
@@ -1776,15 +1779,19 @@  static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 		goto out;
 	}
 
-	if (unlikely(!PageUptodate(page))) {
-		pte_t pteval;
+	if (unlikely(hwposioned || !PageUptodate(page))) {
+		swp_entry_t swp_entry;
 
 		dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
-		pteval = swp_entry_to_pte(make_swapin_error_entry());
-		set_pte_at(vma->vm_mm, addr, pte, pteval);
-		swap_free(entry);
+		if (hwposioned) {
+			swp_entry = make_hwpoison_entry(swapcache);
+			page = swapcache;
+		} else {
+			swp_entry = make_swapin_error_entry();
+		}
+		new_pte = swp_entry_to_pte(swp_entry);
 		ret = 0;
-		goto out;
+		goto setpte;
 	}
 
 	/* See do_swap_page() */
@@ -1816,6 +1823,7 @@  static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 		new_pte = pte_mksoft_dirty(new_pte);
 	if (pte_swp_uffd_wp(*pte))
 		new_pte = pte_mkuffd_wp(new_pte);
+setpte:
 	set_pte_at(vma->vm_mm, addr, pte, new_pte);
 	swap_free(entry);
 out: