diff mbox series

[v2,4/5] rmap:addd folio_remove_rmap_range()

Message ID 20230228122308.2972219-5-fengwei.yin@intel.com (mailing list archive)
State New
Headers show
Series batched remove rmap in try_to_unmap_one() | expand

Commit Message

Yin Fengwei Feb. 28, 2023, 12:23 p.m. UTC
folio_remove_rmap_range() allows to take down the pte mapping to
a specific range of folio. Comparing to page_remove_rmap(), it
batched updates __lruvec_stat for large folio.

Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
---
 include/linux/rmap.h |  4 +++
 mm/rmap.c            | 58 +++++++++++++++++++++++++++++++++-----------
 2 files changed, 48 insertions(+), 14 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b87d01660412..d2569b42e21a 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -200,6 +200,10 @@  void page_add_file_rmap(struct page *, struct vm_area_struct *,
 		bool compound);
 void page_remove_rmap(struct page *, struct vm_area_struct *,
 		bool compound);
+void folio_remove_rmap_range(struct folio *, struct page *,
+			unsigned int nr_pages, struct vm_area_struct *,
+			bool compound);
+
 
 void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
 		unsigned long address, rmap_t flags);
diff --git a/mm/rmap.c b/mm/rmap.c
index d243e557c6e4..fc02a8f9c59c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1357,23 +1357,25 @@  void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
 }
 
 /**
- * page_remove_rmap - take down pte mapping from a page
- * @page:	page to remove mapping from
+ * folio_remove_rmap_range - take down pte mapping from a range of pages
+ * @folio:	folio to remove mapping from
+ * @page:	The first page to take down pte mapping
+ * @nr_pages:	The number of pages which will be take down pte mapping
  * @vma:	the vm area from which the mapping is removed
  * @compound:	uncharge the page as compound or small page
  *
  * The caller needs to hold the pte lock.
  */
-void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
-		bool compound)
+void folio_remove_rmap_range(struct folio *folio, struct page *page,
+			unsigned int nr_pages, struct vm_area_struct *vma,
+			bool compound)
 {
-	struct folio *folio = page_folio(page);
 	atomic_t *mapped = &folio->_nr_pages_mapped;
-	int nr = 0, nr_pmdmapped = 0;
-	bool last;
+	int nr = 0, nr_pmdmapped = 0, last;
 	enum node_stat_item idx;
 
-	VM_BUG_ON_PAGE(compound && !PageHead(page), page);
+	VM_BUG_ON_FOLIO(compound && (nr_pages != folio_nr_pages(folio)), folio);
+	VM_BUG_ON_FOLIO(compound && (page != &folio->page), folio);
 
 	/* Hugetlb pages are not counted in NR_*MAPPED */
 	if (unlikely(folio_test_hugetlb(folio))) {
@@ -1384,12 +1386,16 @@  void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
 
 	/* Is page being unmapped by PTE? Is this its last map to be removed? */
 	if (likely(!compound)) {
-		last = atomic_add_negative(-1, &page->_mapcount);
-		nr = last;
-		if (last && folio_test_large(folio)) {
-			nr = atomic_dec_return_relaxed(mapped);
-			nr = (nr < COMPOUND_MAPPED);
-		}
+		do {
+			last = atomic_add_negative(-1, &page->_mapcount);
+			if (last && folio_test_large(folio)) {
+				last = atomic_dec_return_relaxed(mapped);
+				last = (last < COMPOUND_MAPPED);
+			}
+
+			if (last)
+				nr++;
+		} while (page++, --nr_pages > 0);
 	} else if (folio_test_pmd_mappable(folio)) {
 		/* That test is redundant: it's for safety or to optimize out */
 
@@ -1443,6 +1449,30 @@  void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
 	munlock_vma_folio(folio, vma, compound);
 }
 
+/**
+ * page_remove_rmap - take down pte mapping from a page
+ * @page:	page to remove mapping from
+ * @vma:	the vm area from which the mapping is removed
+ * @compound:	uncharge the page as compound or small page
+ *
+ * The caller needs to hold the pte lock.
+ */
+void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
+		bool compound)
+{
+	struct folio *folio = page_folio(page);
+	unsigned int nr_pages;
+
+	VM_BUG_ON_FOLIO(compound && (page != &folio->page), folio);
+
+	if (likely(!compound))
+		nr_pages = 1;
+	else
+		nr_pages = folio_nr_pages(folio);
+
+	folio_remove_rmap_range(folio, page, nr_pages, vma, compound);
+}
+
 static bool try_to_unmap_one_hugetlb(struct folio *folio,
 		struct vm_area_struct *vma, struct mmu_notifier_range range,
 		struct page_vma_mapped_walk pvmw, unsigned long address,