diff mbox series

[v2,2/2] mm/khugepaged: add case for mapping anonymous pte-mapped THPs by pmds

Message ID 94575367298bc13d36937f15ac2cd1652684272f.1701917546.git.xuyu@linux.alibaba.com (mailing list archive)
State New
Headers show
Series attempt to map anonymous pte-mapped THPs by pmds | expand

Commit Message

Xu Yu Dec. 7, 2023, 3:09 a.m. UTC
This adds another case, as David points out, which is suitable for
mapping anonymous pte-mapped THPs by pmds. When all subpages are
PageAnonExclusive (PTEs may be either R/O or R/W), we can clear
PageAnonExclusive on all tail pages but the first (head) page and
collapse to a R/W PMD with VM_WRITE or a R/O PMD without VM_WRITE.

Signed-off-by: Xu Yu <xuyu@linux.alibaba.com>
---
 mm/khugepaged.c | 21 +++++++++++++++++++--
 1 file changed, 19 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 85c7a2ab44ce..5c51b09cb291 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1366,8 +1366,14 @@  static int collapse_pte_mapped_anon_thp(struct mm_struct *mm,
 	 * Case 1:
 	 * No subpages are PageAnonExclusive (PTEs must be R/O), we can
 	 * collapse into a R/O PMD without further action.
+	 *
+	 * Case 2:
+	 * All subpages are PageAnonExclusive (PTEs may be either R/O or R/W),
+	 * we clear PageAnonExclusive on all tail pages but the head page and
+	 * collapse to a R/W PMD with VM_WRITE or a R/O PMD without VM_WRITE.
 	 */
-	if (!(exclusive == 0 && !writable))
+	if (!((exclusive == 0 && !writable) ||
+	      (exclusive == HPAGE_PMD_NR)))
 		goto drop_hpage;
 
 	/* Collapse pmd entry */
@@ -1396,12 +1402,21 @@  static int collapse_pte_mapped_anon_thp(struct mm_struct *mm,
 
 		page = vm_normal_page(vma, addr, pteval);
 		page_remove_rmap(page, vma, false);
+
+		if (exclusive == HPAGE_PMD_NR)
+			ClearPageAnonExclusive(page);
 	}
 	pte_unmap_unlock(start_pte, ptl);
 
 	/* Install pmd entry */
 	pgtable = pmd_pgtable(pmdval);
 	pmdval = mk_huge_pmd(hpage, vma->vm_page_prot);
+
+	if (exclusive == HPAGE_PMD_NR) {
+		SetPageAnonExclusive(hpage);
+		pmdval = maybe_pmd_mkwrite(pmd_mkdirty(pmdval), vma);
+	}
+
 	spin_lock(pml);
 	page_add_anon_rmap(hpage, vma, haddr, RMAP_COMPOUND);
 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
@@ -1596,7 +1611,9 @@  static int hpage_collapse_scan_pmd(struct mm_struct *mm,
 out_unmap:
 	pte_unmap_unlock(pte, ptl);
 
-	if (is_hpage && (exclusive == 0 && !writable)) {
+	if (is_hpage &&
+	    ((exclusive == 0 && !writable) ||
+	     (exclusive == HPAGE_PMD_NR))) {
 		int res;
 
 		res = collapse_pte_mapped_anon_thp(mm, vma, address,