diff mbox series

[v5,05/17] mm: Handle COW-ed PTE during zapping

Message ID 20230414142341.354556-6-shiyn.lin@gmail.com (mailing list archive)
State New
Headers show
Series Introduce Copy-On-Write to Page Table | expand

Commit Message

Chih-En Lin April 14, 2023, 2:23 p.m. UTC
To support the zap functionally for COW-ed PTE, we need to zap the
entire PTE table each time instead of partially zapping pages.
Therefore, if the zap range covers the entire PTE table, we can
handle de-account, remove the rmap, etc. However we shouldn't modify
the entries when there are still someone references to the COW-ed
PTE. Otherwise, if only the zapped process references to this COW-ed
PTE, we just reuse it and do the normal zapping.

Signed-off-by: Chih-En Lin <shiyn.lin@gmail.com>
---
 mm/memory.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 87 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index f8a87a0fc382..7908e20f802a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -192,6 +192,12 @@  static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+#ifdef CONFIG_COW_PTE
+		if (test_bit(MMF_COW_PTE, &tlb->mm->flags)) {
+			if (!pmd_none(*pmd) && !pmd_write(*pmd))
+				VM_WARN_ON(cow_pte_count(pmd) != 1);
+		}
+#endif
 		if (pmd_none_or_clear_bad(pmd))
 			continue;
 		free_pte_range(tlb, pmd, addr);
@@ -1656,6 +1662,7 @@  zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
 
 #define ZAP_PTE_INIT 0x0000
 #define ZAP_PTE_FORCE_FLUSH 0x0001
+#define ZAP_PTE_IS_SHARED 0x0002
 
 struct zap_pte_details {
 	pte_t **pte;
@@ -1681,9 +1688,13 @@  zap_present_pte(struct mmu_gather *tlb, struct vm_area_struct *vma,
 	if (unlikely(!should_zap_page(details, page)))
 		return 0;
 
-	ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
+	if (pte_details->flags & ZAP_PTE_IS_SHARED)
+		ptent = ptep_get(pte);
+	else
+		ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
 	tlb_remove_tlb_entry(tlb, pte, addr);
-	zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
+	if (!(pte_details->flags & ZAP_PTE_IS_SHARED))
+		zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
 	if (unlikely(!page))
 		return 0;
 
@@ -1767,8 +1778,10 @@  zap_nopresent_pte(struct mmu_gather *tlb, struct vm_area_struct *vma,
 		/* We should have covered all the swap entry types */
 		WARN_ON_ONCE(1);
 	}
-	pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
-	zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
+	if (!(pte_details->flags & ZAP_PTE_IS_SHARED)) {
+		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
+		zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
+	}
 }
 
 static unsigned long zap_pte_range(struct mmu_gather *tlb,
@@ -1785,6 +1798,36 @@  static unsigned long zap_pte_range(struct mmu_gather *tlb,
 		.flags = ZAP_PTE_INIT,
 		.pte = &pte,
 	};
+#ifdef CONFIG_COW_PTE
+	unsigned long orig_addr = addr;
+
+	if (test_bit(MMF_COW_PTE, &mm->flags) && !pmd_write(*pmd)) {
+		if (!range_in_vma(vma, addr & PMD_MASK,
+				  (addr + PMD_SIZE) & PMD_MASK)) {
+			/*
+			 * We cannot promise this COW-ed PTE will also be zap
+			 * with the rest of VMAs. So, break COW PTE here.
+			 */
+			break_cow_pte(vma, pmd, addr);
+		} else {
+			/*
+			 * We free the batched memory before we handle
+			 * COW-ed PTE.
+			 */
+			tlb_flush_mmu(tlb);
+			end = (addr + PMD_SIZE) & PMD_MASK;
+			addr = addr & PMD_MASK;
+			start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+			if (cow_pte_count(pmd) == 1) {
+				/* Reuse COW-ed PTE */
+				pmd_t new = pmd_mkwrite(*pmd);
+				set_pmd_at(tlb->mm, addr, pmd, new);
+			} else
+				pte_details.flags |= ZAP_PTE_IS_SHARED;
+			pte_unmap_unlock(start_pte, ptl);
+		}
+	}
+#endif
 
 	tlb_change_page_size(tlb, PAGE_SIZE);
 again:
@@ -1828,7 +1871,16 @@  static unsigned long zap_pte_range(struct mmu_gather *tlb,
 	 */
 	if (pte_details.flags & ZAP_PTE_FORCE_FLUSH) {
 		pte_details.flags &= ~ZAP_PTE_FORCE_FLUSH;
-		tlb_flush_mmu(tlb);
+		/*
+		 * With COW-ed PTE, we defer freeing the batched memory until
+		 * after we have actually cleared the COW-ed PTE's pmd entry.
+		 * Since, if we are the only ones still referencing the COW-ed
+		 * PTe table after we have freed the batched memory, the page
+		 * table check will report a bug with anon_map_count != 0 in
+		 * page_table_check_zero().
+		 */
+		if (!(pte_details.flags & ZAP_PTE_IS_SHARED))
+			tlb_flush_mmu(tlb);
 	}
 
 	if (addr != end) {
@@ -1836,6 +1888,36 @@  static unsigned long zap_pte_range(struct mmu_gather *tlb,
 		goto again;
 	}
 
+#ifdef CONFIG_COW_PTE
+	if (pte_details.flags & ZAP_PTE_IS_SHARED) {
+		start_pte = pte_offset_map_lock(mm, pmd, orig_addr, &ptl);
+		if (!pmd_put_pte(pmd)) {
+			pmd_t new = pmd_mkwrite(*pmd);
+			set_pmd_at(tlb->mm, addr, pmd, new);
+			/*
+			 * We are the only ones who still referencing this.
+			 * Clear the page table check before we free the
+			 * batched memory.
+			 */
+			page_table_check_pte_clear_range(mm, orig_addr, *pmd);
+			pte_unmap_unlock(start_pte, ptl);
+			/* free the batched memory and flush the TLB. */
+			tlb_flush_mmu(tlb);
+			free_pte_range(tlb, pmd, addr);
+		} else {
+			pmd_clear(pmd);
+			pte_unmap_unlock(start_pte, ptl);
+			mm_dec_nr_ptes(tlb->mm);
+			/*
+			 * Someone still referencing to the table,
+			 * we just flush TLB here.
+			 */
+			flush_tlb_range(vma, addr & PMD_MASK,
+					(addr + PMD_SIZE) & PMD_MASK);
+		}
+	}
+#endif
+
 	return addr;
 }