diff mbox series

[resend,2/2] mm: always use base address when copy gigantic page

Message ID 20241025004456.3435808-2-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series [resend,1/2] mm: always use base address when clear gigantic page | expand

Commit Message

Kefeng Wang Oct. 25, 2024, 12:44 a.m. UTC
When copy gigantic page, it copies page from the first subpage to
the last subpage, that is, aligned base address is needed in it,
and we don't need to aligned down the address in the caller as the
real address will be passed to process_huge_page().

Fixes: 530dd9926dc1 ("mm: memory: improve copy_user_large_folio()")
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/hugetlb.c | 5 ++---
 mm/memory.c  | 1 +
 2 files changed, 3 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 906294ac85dc..2674dba12c73 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5338,7 +5338,7 @@  int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 					break;
 				}
 				ret = copy_user_large_folio(new_folio, pte_folio,
-						ALIGN_DOWN(addr, sz), dst_vma);
+							    addr, dst_vma);
 				folio_put(pte_folio);
 				if (ret) {
 					folio_put(new_folio);
@@ -6637,8 +6637,7 @@  int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 			*foliop = NULL;
 			goto out;
 		}
-		ret = copy_user_large_folio(folio, *foliop,
-					    ALIGN_DOWN(dst_addr, size), dst_vma);
+		ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
 		folio_put(*foliop);
 		*foliop = NULL;
 		if (ret) {
diff --git a/mm/memory.c b/mm/memory.c
index 934ab5fff537..281c0460c572 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6841,6 +6841,7 @@  static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
 	struct page *dst_page;
 	struct page *src_page;
 
+	addr = ALIGN_DOWN(addr, folio_size(dst));
 	for (i = 0; i < nr_pages; i++) {
 		dst_page = folio_page(dst, i);
 		src_page = folio_page(src, i);