diff mbox series

[RFC,10/12] mm/gup: Handle huge pmd for follow_pmd_mask()

Message ID 20231116012908.392077-11-peterx@redhat.com (mailing list archive)
State New
Headers show
Series mm/gup: Unify hugetlb, part 2 | expand

Commit Message

Peter Xu Nov. 16, 2023, 1:29 a.m. UTC
Do proper replacement of pmd_trans_huge() by using pmd_thp_or_huge() to
also cover pmd_huge() as long as enabled.

FOLL_TOUCH and FOLL_SPLIT_PMD only apply to THP, not yet huge.

Since now follow_trans_huge_pmd() can handle hugetlb pages, renaming it
into follow_huge_pmd() to match what it does.

When at it, move the ctx->page_mask setup into follow_huge_pmd(), only set
it when the page is valid.  It was not a bug to set it before even if GUP
failed (page==NULL), because follow_page_mask() callers always ignores
page_mask if so.  But doing so makes the code cleaner.

Signed-off-by: Peter Xu <peterx@redhat.com>
---
 mm/gup.c         | 12 ++++++------
 mm/huge_memory.c | 19 ++++++++++---------
 mm/internal.h    |  6 +++---
 3 files changed, 19 insertions(+), 18 deletions(-)
diff mbox series

Patch

diff --git a/mm/gup.c b/mm/gup.c
index 55a2ae55f00f..7c210206470f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -713,31 +713,31 @@  static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 		spin_unlock(ptl);
 		return page;
 	}
-	if (likely(!pmd_trans_huge(pmdval)))
+	if (likely(!pmd_thp_or_huge(pmdval)))
 		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
 
 	if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
 		return no_page_table(vma, flags, address);
 
 	ptl = pmd_lock(mm, pmd);
-	if (unlikely(!pmd_present(*pmd))) {
+	pmdval = *pmd;
+	if (unlikely(!pmd_present(pmdval))) {
 		spin_unlock(ptl);
 		return no_page_table(vma, flags, address);
 	}
-	if (unlikely(!pmd_trans_huge(*pmd))) {
+	if (unlikely(!pmd_thp_or_huge(pmdval))) {
 		spin_unlock(ptl);
 		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
 	}
-	if (flags & FOLL_SPLIT_PMD) {
+	if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) {
 		spin_unlock(ptl);
 		split_huge_pmd(vma, pmd, address);
 		/* If pmd was left empty, stuff a page table in there quickly */
 		return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
 			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
 	}
-	page = follow_trans_huge_pmd(vma, address, pmd, flags);
+	page = follow_huge_pmd(vma, address, pmd, flags, ctx);
 	spin_unlock(ptl);
-	ctx->page_mask = HPAGE_PMD_NR - 1;
 	return page;
 }
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6748ef5f3fd9..43fb81218c5e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1486,32 +1486,32 @@  static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
 	return !userfaultfd_huge_pmd_wp(vma, pmd);
 }
 
-struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
-				   unsigned long addr,
-				   pmd_t *pmd,
-				   unsigned int flags)
+struct page *follow_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
+			     pmd_t *pmd, unsigned int flags,
+			     struct follow_page_context *ctx)
 {
 	struct mm_struct *mm = vma->vm_mm;
+	pmd_t pmdval = *pmd;
 	struct page *page;
 	int ret;
 
 	assert_spin_locked(pmd_lockptr(mm, pmd));
 
-	page = pmd_page(*pmd);
+	page = pmd_page(pmdval);
 	VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
 
 	if ((flags & FOLL_WRITE) &&
-	    !can_follow_write_pmd(*pmd, page, vma, flags))
+	    !can_follow_write_pmd(pmdval, page, vma, flags))
 		return NULL;
 
 	/* Avoid dumping huge zero page */
-	if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
+	if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval))
 		return ERR_PTR(-EFAULT);
 
 	if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
 		return NULL;
 
-	if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
+	if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page))
 		return ERR_PTR(-EMLINK);
 
 	VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
@@ -1521,10 +1521,11 @@  struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 	if (ret)
 		return ERR_PTR(ret);
 
-	if (flags & FOLL_TOUCH)
+	if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH))
 		touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
 
 	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
+	ctx->page_mask = HPAGE_PMD_NR - 1;
 	VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
 
 	return page;
diff --git a/mm/internal.h b/mm/internal.h
index 8450562744cf..bf0dc896c274 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1007,9 +1007,9 @@  int __must_check try_grab_page(struct page *page, unsigned int flags);
 /*
  * mm/huge_memory.c
  */
-struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
-				   unsigned long addr, pmd_t *pmd,
-				   unsigned int flags);
+struct page *follow_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
+			     pmd_t *pmd, unsigned int flags,
+			     struct follow_page_context *ctx);
 
 /*
  * mm/mmap.c