diff mbox series

[v2,3/5] rmap: cleanup exit path of try_to_unmap_one_page()

Message ID 20230228122308.2972219-4-fengwei.yin@intel.com (mailing list archive)
State New
Headers show
Series batched remove rmap in try_to_unmap_one() | expand

Commit Message

Yin Fengwei Feb. 28, 2023, 12:23 p.m. UTC
Cleanup exit path of try_to_unmap_one_page() by removing
some duplicated code.
Move page_vma_mapped_walk_done() back to try_to_unmap_one().
Change subpage to page as folio has no concept of subpage.

Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
---
 mm/rmap.c | 74 ++++++++++++++++++++++---------------------------------
 1 file changed, 30 insertions(+), 44 deletions(-)
diff mbox series

Patch

diff --git a/mm/rmap.c b/mm/rmap.c
index 987ab402392f..d243e557c6e4 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1530,7 +1530,7 @@  static bool try_to_unmap_one_hugetlb(struct folio *folio,
 	 *
 	 * See Documentation/mm/mmu_notifier.rst
 	 */
-	page_remove_rmap(&folio->page, vma, folio_test_hugetlb(folio));
+	page_remove_rmap(&folio->page, vma, true);
 	/* No VM_LOCKED set in vma->vm_flags for hugetlb. So not
 	 * necessary to call mlock_drain_local().
 	 */
@@ -1545,15 +1545,13 @@  static bool try_to_unmap_one_page(struct folio *folio,
 		struct page_vma_mapped_walk pvmw, unsigned long address,
 		enum ttu_flags flags)
 {
-	bool anon_exclusive, ret = true;
-	struct page *subpage;
+	bool anon_exclusive;
+	struct page *page;
 	struct mm_struct *mm = vma->vm_mm;
 	pte_t pteval;
 
-	subpage = folio_page(folio,
-			pte_pfn(*pvmw.pte) - folio_pfn(folio));
-	anon_exclusive = folio_test_anon(folio) &&
-		PageAnonExclusive(subpage);
+	page = folio_page(folio, pte_pfn(*pvmw.pte) - folio_pfn(folio));
+	anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
 
 	flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
 	/* Nuke the page table entry. */
@@ -1581,15 +1579,14 @@  static bool try_to_unmap_one_page(struct folio *folio,
 	pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
 
 	/* Set the dirty flag on the folio now the pte is gone. */
-	if (pte_dirty(pteval))
+	if (pte_dirty(pteval) && !folio_test_dirty(folio))
 		folio_mark_dirty(folio);
 
 	/* Update high watermark before we lower rss */
 	update_hiwater_rss(mm);
 
-	if (PageHWPoison(subpage) && !(flags & TTU_HWPOISON)) {
-		pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
-		dec_mm_counter(mm, mm_counter(&folio->page));
+	if (PageHWPoison(page) && !(flags & TTU_HWPOISON)) {
+		pteval = swp_entry_to_pte(make_hwpoison_entry(page));
 		set_pte_at(mm, address, pvmw.pte, pteval);
 	} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
 		/*
@@ -1602,12 +1599,11 @@  static bool try_to_unmap_one_page(struct folio *folio,
 		 * migration) will not expect userfaults on already
 		 * copied pages.
 		 */
-		dec_mm_counter(mm, mm_counter(&folio->page));
 		/* We have to invalidate as we cleared the pte */
 		mmu_notifier_invalidate_range(mm, address,
 				address + PAGE_SIZE);
 	} else if (folio_test_anon(folio)) {
-		swp_entry_t entry = { .val = page_private(subpage) };
+		swp_entry_t entry = { .val = page_private(page) };
 		pte_t swp_pte;
 		/*
 		 * Store the swap location in the pte.
@@ -1616,12 +1612,10 @@  static bool try_to_unmap_one_page(struct folio *folio,
 		if (unlikely(folio_test_swapbacked(folio) !=
 					folio_test_swapcache(folio))) {
 			WARN_ON_ONCE(1);
-			ret = false;
 			/* We have to invalidate as we cleared the pte */
 			mmu_notifier_invalidate_range(mm, address,
 					address + PAGE_SIZE);
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
+			goto exit;
 		}
 
 		/* MADV_FREE page check */
@@ -1653,7 +1647,6 @@  static bool try_to_unmap_one_page(struct folio *folio,
 				/* Invalidate as we cleared the pte */
 				mmu_notifier_invalidate_range(mm,
 						address, address + PAGE_SIZE);
-				dec_mm_counter(mm, MM_ANONPAGES);
 				goto discard;
 			}
 
@@ -1661,43 +1654,30 @@  static bool try_to_unmap_one_page(struct folio *folio,
 			 * If the folio was redirtied, it cannot be
 			 * discarded. Remap the page to page table.
 			 */
-			set_pte_at(mm, address, pvmw.pte, pteval);
 			folio_set_swapbacked(folio);
-			ret = false;
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
+			goto exit_restore_pte;
 		}
 
-		if (swap_duplicate(entry) < 0) {
-			set_pte_at(mm, address, pvmw.pte, pteval);
-			ret = false;
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
-		}
+		if (swap_duplicate(entry) < 0)
+			goto exit_restore_pte;
+
 		if (arch_unmap_one(mm, vma, address, pteval) < 0) {
 			swap_free(entry);
-			set_pte_at(mm, address, pvmw.pte, pteval);
-			ret = false;
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
+			goto exit_restore_pte;
 		}
 
 		/* See page_try_share_anon_rmap(): clear PTE first. */
-		if (anon_exclusive &&
-				page_try_share_anon_rmap(subpage)) {
+		if (anon_exclusive && page_try_share_anon_rmap(page)) {
 			swap_free(entry);
-			set_pte_at(mm, address, pvmw.pte, pteval);
-			ret = false;
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
+			goto exit_restore_pte;
 		}
+
 		if (list_empty(&mm->mmlist)) {
 			spin_lock(&mmlist_lock);
 			if (list_empty(&mm->mmlist))
 				list_add(&mm->mmlist, &init_mm.mmlist);
 			spin_unlock(&mmlist_lock);
 		}
-		dec_mm_counter(mm, MM_ANONPAGES);
 		inc_mm_counter(mm, MM_SWAPENTS);
 		swp_pte = swp_entry_to_pte(entry);
 		if (anon_exclusive)
@@ -1708,8 +1688,7 @@  static bool try_to_unmap_one_page(struct folio *folio,
 			swp_pte = pte_swp_mkuffd_wp(swp_pte);
 		set_pte_at(mm, address, pvmw.pte, swp_pte);
 		/* Invalidate as we cleared the pte */
-		mmu_notifier_invalidate_range(mm, address,
-				address + PAGE_SIZE);
+		mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE);
 	} else {
 		/*
 		 * This is a locked file-backed folio,
@@ -1722,11 +1701,16 @@  static bool try_to_unmap_one_page(struct folio *folio,
 		 *
 		 * See Documentation/mm/mmu_notifier.rst
 		 */
-		dec_mm_counter(mm, mm_counter_file(&folio->page));
 	}
 
 discard:
-	return ret;
+	dec_mm_counter(vma->vm_mm, mm_counter(&folio->page));
+	return true;
+
+exit_restore_pte:
+	set_pte_at(mm, address, pvmw.pte, pteval);
+exit:
+	return false;
 }
 
 /*
@@ -1804,8 +1788,10 @@  static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 					pte_pfn(*pvmw.pte) - folio_pfn(folio));
 		ret = try_to_unmap_one_page(folio, vma,
 						range, pvmw, address, flags);
-		if (!ret)
+		if (!ret) {
+			page_vma_mapped_walk_done(&pvmw);
 			break;
+		}
 
 		/*
 		 * No need to call mmu_notifier_invalidate_range() it has be
@@ -1814,7 +1800,7 @@  static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 		 *
 		 * See Documentation/mm/mmu_notifier.rst
 		 */
-		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
+		page_remove_rmap(subpage, vma, false);
 		if (vma->vm_flags & VM_LOCKED)
 			mlock_drain_local();
 		folio_put(folio);