diff mbox series

[v4,5/6] fs: hugetlbfs: support poison recover from hugetlbfs_migrate_folio()

Message ID 20240603092439.3360652-6-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series mm: migrate: support poison recover from migrate folio | expand

Commit Message

Kefeng Wang June 3, 2024, 9:24 a.m. UTC
This is similar to __migrate_folio(), use folio_mc_copy() in HugeTLB
folio migration to avoid panic when copy from poisoned folio.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 fs/hugetlbfs/inode.c |  2 +-
 mm/migrate.c         | 14 +++++++++-----
 2 files changed, 10 insertions(+), 6 deletions(-)

Comments

Jane Chu June 6, 2024, 11:30 p.m. UTC | #1
On 6/3/2024 2:24 AM, Kefeng Wang wrote:

> This is similar to __migrate_folio(), use folio_mc_copy() in HugeTLB
> folio migration to avoid panic when copy from poisoned folio.
>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>   fs/hugetlbfs/inode.c |  2 +-
>   mm/migrate.c         | 14 +++++++++-----
>   2 files changed, 10 insertions(+), 6 deletions(-)
>
> diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
> index 6df794ed4066..1107e5aa8343 100644
> --- a/fs/hugetlbfs/inode.c
> +++ b/fs/hugetlbfs/inode.c
> @@ -1128,7 +1128,7 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping,
>   		hugetlb_set_folio_subpool(src, NULL);
>   	}
>   
> -	folio_migrate_copy(dst, src);
> +	folio_migrate_flags(dst, src);
>   
>   	return MIGRATEPAGE_SUCCESS;
>   }
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 28aa9da95781..e9b52a86f539 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -532,15 +532,19 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
>   				   struct folio *dst, struct folio *src)
>   {
>   	XA_STATE(xas, &mapping->i_pages, folio_index(src));
> -	int expected_count;
> +	int ret, expected_count = folio_expected_refs(mapping, src);
>   
> -	xas_lock_irq(&xas);
> -	expected_count = folio_expected_refs(mapping, src);
> -	if (!folio_ref_freeze(src, expected_count)) {
> -		xas_unlock_irq(&xas);
> +	if (!folio_ref_freeze(src, expected_count))
>   		return -EAGAIN;
> +
> +	ret = folio_mc_copy(dst, src);
> +	if (unlikely(ret)) {
> +		folio_ref_unfreeze(src, expected_count);
> +		return ret;
>   	}
>   
> +	xas_lock_irq(&xas);
> +
>   	dst->index = src->index;
>   	dst->mapping = src->mapping;
>   

Look good!

Reviewed-by:  Jane Chu <jane.chu@oracle.com>

-jane
diff mbox series

Patch

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 6df794ed4066..1107e5aa8343 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1128,7 +1128,7 @@  static int hugetlbfs_migrate_folio(struct address_space *mapping,
 		hugetlb_set_folio_subpool(src, NULL);
 	}
 
-	folio_migrate_copy(dst, src);
+	folio_migrate_flags(dst, src);
 
 	return MIGRATEPAGE_SUCCESS;
 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 28aa9da95781..e9b52a86f539 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -532,15 +532,19 @@  int migrate_huge_page_move_mapping(struct address_space *mapping,
 				   struct folio *dst, struct folio *src)
 {
 	XA_STATE(xas, &mapping->i_pages, folio_index(src));
-	int expected_count;
+	int ret, expected_count = folio_expected_refs(mapping, src);
 
-	xas_lock_irq(&xas);
-	expected_count = folio_expected_refs(mapping, src);
-	if (!folio_ref_freeze(src, expected_count)) {
-		xas_unlock_irq(&xas);
+	if (!folio_ref_freeze(src, expected_count))
 		return -EAGAIN;
+
+	ret = folio_mc_copy(dst, src);
+	if (unlikely(ret)) {
+		folio_ref_unfreeze(src, expected_count);
+		return ret;
 	}
 
+	xas_lock_irq(&xas);
+
 	dst->index = src->index;
 	dst->mapping = src->mapping;