diff mbox series

[v2,2/5] mm/hugetlb: use PTE page lock to protect CONT-PTE entries

Message ID 064489292e6e224ef4406af990c7cdc3c054ca77.1661240170.git.baolin.wang@linux.alibaba.com (mailing list archive)
State New
Headers show
Series Fix some issues when looking up hugetlb page | expand

Commit Message

Baolin Wang Aug. 23, 2022, 7:50 a.m. UTC
Considering the pte entries of a CONT-PTE hugetlb can not span on
multiple PTEs, we can change to use the PTE page lock, which can
be much finer grain that lock in the mm.

Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
 include/linux/hugetlb.h | 18 ++++++++++++++++--
 1 file changed, 16 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d491138..4b172a7 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -892,9 +892,23 @@  static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
 					   struct mm_struct *mm, pte_t *pte)
 {
-	if (huge_page_size(h) == PMD_SIZE)
-		return pmd_lockptr(mm, (pmd_t *) pte);
 	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
+
+	if (huge_page_size(h) == PMD_SIZE) {
+		return pmd_lockptr(mm, (pmd_t *) pte);
+	} else if (huge_page_size(h) < PMD_SIZE) {
+		unsigned long mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
+		struct page *page =
+			virt_to_page((void *)((unsigned long)pte & mask));
+
+		/*
+		 * Considering CONT-PTE size hugetlb, since the CONT-PTE
+		 * entry can not span multiple PTEs, we can use the PTE
+		 * page lock to get a fine grained lock.
+		 */
+		return ptlock_ptr(page);
+	}
+
 	return &mm->page_table_lock;
 }