diff mbox series

[RFC,v2,17/30] mm: thp: PUD THP COW splits PUD page and falls back to PMD page.

Message ID 20200928175428.4110504-18-zi.yan@sent.com (mailing list archive)
State New, archived
Headers show
Series 1GB PUD THP support on x86_64 | expand

Commit Message

Zi Yan Sept. 28, 2020, 5:54 p.m. UTC
From: Zi Yan <ziy@nvidia.com>

COW on PUD THPs has the same behavior as COW on PMD THPs to avoid high
COW overhead. As a result, do_huge_pmd_wp will see PMD-mapped PUD THPs,
thus needs to count PUD mappings in total mapcount when calling
page_trans_huge_map_swapcount in reuse_swap_page to avoid false positive.
Change page_trans_huge_map_swapcount to get it right.

Signed-off-by: Zi Yan <ziy@nvidia.com>
---
 include/linux/huge_mm.h |  5 +++++
 mm/huge_memory.c        | 13 +++++++++++++
 mm/memory.c             |  3 +--
 mm/swapfile.c           |  7 ++++++-
 4 files changed, 25 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index e5c68e680907..589e5af5a1c2 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -19,6 +19,7 @@  extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
 extern int do_huge_pud_anonymous_page(struct vm_fault *vmf);
+extern vm_fault_t do_huge_pud_wp_page(struct vm_fault *vmf, pud_t orig_pud);
 #else
 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
 {
@@ -27,6 +28,10 @@  extern int do_huge_pud_anonymous_page(struct vm_fault *vmf)
 {
 	return VM_FAULT_FALLBACK;
 }
+extern vm_fault_t do_huge_pud_wp_page(struct vm_fault *vmf, pud_t orig_pud)
+{
+	return VM_FAULT_FALLBACK;
+}
 #endif
 
 extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4a899e856088..9aa19aa643cd 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1335,6 +1335,19 @@  void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
 unlock:
 	spin_unlock(vmf->ptl);
 }
+
+vm_fault_t do_huge_pud_wp_page(struct vm_fault *vmf, pud_t orig_pud)
+{
+	struct vm_area_struct *vma = vmf->vma;
+
+	/*
+	 * split pud directly. a whole pud page is not swappable, so there is
+	 * no need to try reuse_swap_page
+	 */
+	__split_huge_pud(vma, vmf->pud, vmf->address, false, NULL);
+	return VM_FAULT_FALLBACK;
+}
+
 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 
 void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
diff --git a/mm/memory.c b/mm/memory.c
index e0e0459c0caf..ab80d13807aa 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4141,9 +4141,8 @@  static vm_fault_t create_huge_pud(struct vm_fault *vmf)
 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-	/* No support for anonymous transparent PUD pages yet */
 	if (vma_is_anonymous(vmf->vma))
-		return VM_FAULT_FALLBACK;
+		return do_huge_pud_wp_page(vmf, orig_pud);
 	if (vmf->vma->vm_ops->huge_fault)
 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 495ecdbd7859..a6989b0c4d44 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1635,7 +1635,12 @@  static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
 	/* hugetlbfs shouldn't call it */
 	VM_BUG_ON_PAGE(PageHuge(page), page);
 
-	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
+	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page)) ||
+	    /*
+	     * PMD-mapped PUD THP need to take PUD mappings into account by
+	     * using page_trans_huge_mapcount
+	     */
+	    unlikely(thp_order(page) == HPAGE_PUD_ORDER)) {
 		mapcount = page_trans_huge_mapcount(page, total_mapcount);
 		if (PageSwapCache(page))
 			swapcount = page_swapcount(page);