diff mbox series

[v4,4/5] rmap:addd folio_remove_rmap_range()

Message ID 20230313124526.1207490-5-fengwei.yin@intel.com (mailing list archive)
State New
Headers show
Series batched remove rmap in try_to_unmap_one() | expand

Commit Message

Yin, Fengwei March 13, 2023, 12:45 p.m. UTC
folio_remove_rmap_range() allows to take down the pte mapping to
a specific range of folio. Comparing to page_remove_rmap(), it
batched updates __lruvec_stat for large folio.

Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
---
 include/linux/rmap.h |  4 +++
 mm/rmap.c            | 58 +++++++++++++++++++++++++++++++++-----------
 2 files changed, 48 insertions(+), 14 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b87d01660412..d2569b42e21a 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -200,6 +200,10 @@  void page_add_file_rmap(struct page *, struct vm_area_struct *,
 		bool compound);
 void page_remove_rmap(struct page *, struct vm_area_struct *,
 		bool compound);
+void folio_remove_rmap_range(struct folio *, struct page *,
+			unsigned int nr_pages, struct vm_area_struct *,
+			bool compound);
+
 
 void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
 		unsigned long address, rmap_t flags);
diff --git a/mm/rmap.c b/mm/rmap.c
index 72fc8c559cd9..bd5331dc9d44 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1355,23 +1355,25 @@  void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
 }
 
 /**
- * page_remove_rmap - take down pte mapping from a page
- * @page:	page to remove mapping from
+ * folio_remove_rmap_range - take down pte mapping from a range of pages
+ * @folio:	folio to remove mapping from
+ * @page:	The first page to take down pte mapping
+ * @nr_pages:	The number of pages which will be take down pte mapping
  * @vma:	the vm area from which the mapping is removed
  * @compound:	uncharge the page as compound or small page
  *
  * The caller needs to hold the pte lock.
  */
-void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
-		bool compound)
+void folio_remove_rmap_range(struct folio *folio, struct page *page,
+			unsigned int nr_pages, struct vm_area_struct *vma,
+			bool compound)
 {
-	struct folio *folio = page_folio(page);
 	atomic_t *mapped = &folio->_nr_pages_mapped;
-	int nr = 0, nr_pmdmapped = 0;
-	bool last;
+	int nr = 0, nr_pmdmapped = 0, last;
 	enum node_stat_item idx;
 
-	VM_BUG_ON_PAGE(compound && !PageHead(page), page);
+	VM_BUG_ON_FOLIO(compound && (nr_pages != folio_nr_pages(folio)), folio);
+	VM_BUG_ON_FOLIO(compound && (page != &folio->page), folio);
 
 	/* Hugetlb pages are not counted in NR_*MAPPED */
 	if (unlikely(folio_test_hugetlb(folio))) {
@@ -1382,12 +1384,16 @@  void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
 
 	/* Is page being unmapped by PTE? Is this its last map to be removed? */
 	if (likely(!compound)) {
-		last = atomic_add_negative(-1, &page->_mapcount);
-		nr = last;
-		if (last && folio_test_large(folio)) {
-			nr = atomic_dec_return_relaxed(mapped);
-			nr = (nr < COMPOUND_MAPPED);
-		}
+		do {
+			last = atomic_add_negative(-1, &page->_mapcount);
+			if (last && folio_test_large(folio)) {
+				last = atomic_dec_return_relaxed(mapped);
+				last = (last < COMPOUND_MAPPED);
+			}
+
+			if (last)
+				nr++;
+		} while (page++, --nr_pages > 0);
 	} else if (folio_test_pmd_mappable(folio)) {
 		/* That test is redundant: it's for safety or to optimize out */
 
@@ -1441,6 +1447,30 @@  void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
 	munlock_vma_folio(folio, vma, compound);
 }
 
+/**
+ * page_remove_rmap - take down pte mapping from a page
+ * @page:	page to remove mapping from
+ * @vma:	the vm area from which the mapping is removed
+ * @compound:	uncharge the page as compound or small page
+ *
+ * The caller needs to hold the pte lock.
+ */
+void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
+		bool compound)
+{
+	struct folio *folio = page_folio(page);
+	unsigned int nr_pages;
+
+	VM_BUG_ON_FOLIO(compound && (page != &folio->page), folio);
+
+	if (likely(!compound))
+		nr_pages = 1;
+	else
+		nr_pages = folio_nr_pages(folio);
+
+	folio_remove_rmap_range(folio, page, nr_pages, vma, compound);
+}
+
 static bool try_to_unmap_one_hugetlb(struct folio *folio,
 		struct vm_area_struct *vma, struct mmu_notifier_range range,
 		struct page_vma_mapped_walk pvmw, unsigned long address,