diff mbox series

[36/46] hugetlb: remove huge_pte_lock and huge_pte_lockptr

Message ID 20230105101844.1893104-37-jthoughton@google.com (mailing list archive)
State New
Headers show
Series Based on latest mm-unstable (85b44c25cd1e). | expand

Commit Message

James Houghton Jan. 5, 2023, 10:18 a.m. UTC
They are replaced with hugetlb_pte_lock{,ptr}. All callers that haven't
already been replaced don't get called when using HGM, so we handle them
by populating hugetlb_ptes with the standard, hstate-sized huge PTEs.

Signed-off-by: James Houghton <jthoughton@google.com>
---
 arch/powerpc/mm/pgtable.c |  7 +++++--
 include/linux/hugetlb.h   | 42 +++++++++++++++------------------------
 mm/hugetlb.c              | 22 +++++++++++++-------
 3 files changed, 36 insertions(+), 35 deletions(-)
diff mbox series

Patch

diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 035a0df47af0..e20d6aa9a2a6 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -258,11 +258,14 @@  int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 
 #ifdef CONFIG_PPC_BOOK3S_64
 		struct hstate *h = hstate_vma(vma);
+		struct hugetlb_pte hpte;
 
 		psize = hstate_get_psize(h);
 #ifdef CONFIG_DEBUG_VM
-		assert_spin_locked(huge_pte_lockptr(huge_page_shift(h),
-						    vma->vm_mm, ptep));
+		/* HGM is not supported for powerpc yet. */
+		hugetlb_pte_populate(&hpte, ptep, huge_page_shift(h),
+				hpage_size_to_level(psize));
+		assert_spin_locked(hpte.ptl);
 #endif
 
 #else
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index e1baf939afb6..4d318bf2ced9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -1032,14 +1032,6 @@  static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
 	return modified_mask;
 }
 
-static inline spinlock_t *huge_pte_lockptr(unsigned int shift,
-					   struct mm_struct *mm, pte_t *pte)
-{
-	if (shift == PMD_SHIFT)
-		return pmd_lockptr(mm, (pmd_t *) pte);
-	return &mm->page_table_lock;
-}
-
 #ifndef hugepages_supported
 /*
  * Some platform decide whether they support huge pages at boot
@@ -1248,12 +1240,6 @@  static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
 	return 0;
 }
 
-static inline spinlock_t *huge_pte_lockptr(unsigned int shift,
-					   struct mm_struct *mm, pte_t *pte)
-{
-	return &mm->page_table_lock;
-}
-
 static inline void hugetlb_count_init(struct mm_struct *mm)
 {
 }
@@ -1328,16 +1314,6 @@  int hugetlb_collapse(struct mm_struct *mm, struct vm_area_struct *vma,
 }
 #endif
 
-static inline spinlock_t *huge_pte_lock(struct hstate *h,
-					struct mm_struct *mm, pte_t *pte)
-{
-	spinlock_t *ptl;
-
-	ptl = huge_pte_lockptr(huge_page_shift(h), mm, pte);
-	spin_lock(ptl);
-	return ptl;
-}
-
 static inline
 spinlock_t *hugetlb_pte_lockptr(struct hugetlb_pte *hpte)
 {
@@ -1358,8 +1334,22 @@  void hugetlb_pte_populate(struct mm_struct *mm, struct hugetlb_pte *hpte,
 			  pte_t *ptep, unsigned int shift,
 			  enum hugetlb_level level)
 {
-	__hugetlb_pte_populate(hpte, ptep, shift, level,
-			huge_pte_lockptr(shift, mm, ptep));
+	spinlock_t *ptl;
+
+	/*
+	 * For contiguous HugeTLB PTEs that can contain other HugeTLB PTEs
+	 * on the same level, the same PTL for both must be used.
+	 *
+	 * For some architectures that implement hugetlb_walk_step, this
+	 * version of hugetlb_pte_populate() may not be correct to use for
+	 * high-granularity PTEs. Instead, call __hugetlb_pte_populate()
+	 * directly.
+	 */
+	if (level == HUGETLB_LEVEL_PMD)
+		ptl = pmd_lockptr(mm, (pmd_t *) ptep);
+	else
+		ptl = &mm->page_table_lock;
+	__hugetlb_pte_populate(hpte, ptep, shift, level, ptl);
 }
 
 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 388c46c7e77a..d71adc03138d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5303,9 +5303,8 @@  int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 				put_page(hpage);
 
 				/* Install the new huge page if src pte stable */
-				dst_ptl = huge_pte_lock(h, dst, dst_pte);
-				src_ptl = huge_pte_lockptr(huge_page_shift(h),
-							   src, src_pte);
+				dst_ptl = hugetlb_pte_lock(&dst_hpte);
+				src_ptl = hugetlb_pte_lockptr(&src_hpte);
 				spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
 				entry = huge_ptep_get(src_pte);
 				if (!pte_same(src_pte_old, entry)) {
@@ -7383,7 +7382,8 @@  pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
 	unsigned long saddr;
 	pte_t *spte = NULL;
 	pte_t *pte;
-	spinlock_t *ptl;
+	struct hugetlb_pte hpte;
+	struct hstate *shstate;
 
 	i_mmap_lock_read(mapping);
 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
@@ -7404,7 +7404,11 @@  pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
 	if (!spte)
 		goto out;
 
-	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
+	shstate = hstate_vma(svma);
+
+	hugetlb_pte_populate(mm, &hpte, spte, huge_page_shift(shstate),
+			hpage_size_to_level(huge_page_size(shstate)));
+	spin_lock(hpte.ptl);
 	if (pud_none(*pud)) {
 		pud_populate(mm, pud,
 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
@@ -7412,7 +7416,7 @@  pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
 	} else {
 		put_page(virt_to_page(spte));
 	}
-	spin_unlock(ptl);
+	spin_unlock(hpte.ptl);
 out:
 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
 	i_mmap_unlock_read(mapping);
@@ -8132,6 +8136,7 @@  void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
 	unsigned long address, start, end;
 	spinlock_t *ptl;
 	pte_t *ptep;
+	struct hugetlb_pte hpte;
 
 	if (!(vma->vm_flags & VM_MAYSHARE))
 		return;
@@ -8156,7 +8161,10 @@  void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
 		ptep = hugetlb_walk(vma, address, sz);
 		if (!ptep)
 			continue;
-		ptl = huge_pte_lock(h, mm, ptep);
+
+		hugetlb_pte_populate(mm, &hpte, ptep, huge_page_shift(h),
+				     hpage_size_to_level(sz));
+		ptl = hugetlb_pte_lock(&hpte);
 		huge_pmd_unshare(mm, vma, address, ptep);
 		spin_unlock(ptl);
 	}