diff mbox series

[20/46] hugetlb: add HGM support for hugetlb_follow_page_mask

Message ID 20230105101844.1893104-21-jthoughton@google.com (mailing list archive)
State New
Headers show
Series Based on latest mm-unstable (85b44c25cd1e). | expand

Commit Message

James Houghton Jan. 5, 2023, 10:18 a.m. UTC
The change here is very simple: do a high-granularity walk.

Signed-off-by: James Houghton <jthoughton@google.com>
---
 mm/hugetlb.c | 24 +++++++++++++++++-------
 1 file changed, 17 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 30fea414d9ee..718572444a73 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6553,11 +6553,10 @@  struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
 				unsigned long address, unsigned int flags)
 {
 	struct hstate *h = hstate_vma(vma);
-	struct mm_struct *mm = vma->vm_mm;
-	unsigned long haddr = address & huge_page_mask(h);
 	struct page *page = NULL;
 	spinlock_t *ptl;
-	pte_t *pte, entry;
+	pte_t entry;
+	struct hugetlb_pte hpte;
 
 	/*
 	 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
@@ -6567,13 +6566,24 @@  struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
 		return NULL;
 
 	hugetlb_vma_lock_read(vma);
-	pte = hugetlb_walk(vma, haddr, huge_page_size(h));
-	if (!pte)
+
+	if (hugetlb_full_walk(&hpte, vma, address))
 		goto out_unlock;
 
-	ptl = huge_pte_lock(h, mm, pte);
-	entry = huge_ptep_get(pte);
+retry:
+	ptl = hugetlb_pte_lock(&hpte);
+	entry = huge_ptep_get(hpte.ptep);
 	if (pte_present(entry)) {
+		if (unlikely(!hugetlb_pte_present_leaf(&hpte, entry))) {
+			/*
+			 * We raced with someone splitting from under us.
+			 * Keep walking to get to the real leaf.
+			 */
+			spin_unlock(ptl);
+			hugetlb_full_walk_continue(&hpte, vma, address);
+			goto retry;
+		}
+
 		page = pte_page(entry) +
 				((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
 		/*