@@ -1394,6 +1394,32 @@ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
nr_pages, vma, compound);
}
+static void folio_remove_entire_rmap(struct folio *folio,
+ int *nr, int *nr_pmdmapped)
+{
+ bool last;
+ atomic_t *mapped = &folio->_nr_pages_mapped;
+
+ last = atomic_add_negative(-1, &folio->_entire_mapcount);
+ if (last) {
+ *nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped);
+ if (likely(*nr < COMPOUND_MAPPED)) {
+ *nr_pmdmapped = folio_nr_pages(folio);
+ *nr = *nr_pmdmapped - (*nr & FOLIO_PAGES_MAPPED);
+
+ /* Raced ahead of another remove and an add? */
+ if (unlikely(*nr < 0))
+ *nr = 0;
+ } else {
+ /* An add of COMPOUND_MAPPED raced ahead */
+ *nr = 0;
+ }
+ }
+
+ if (!folio_test_pmd_mappable(folio))
+ *nr_pmdmapped = 0;
+}
+
/**
* page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from
@@ -1431,20 +1457,7 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
} else if (folio_test_pmd_mappable(folio)) {
/* That test is redundant: it's for safety or to optimize out */
- last = atomic_add_negative(-1, &folio->_entire_mapcount);
- if (last) {
- nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped);
- if (likely(nr < COMPOUND_MAPPED)) {
- nr_pmdmapped = folio_nr_pages(folio);
- nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
- /* Raced ahead of another remove and an add? */
- if (unlikely(nr < 0))
- nr = 0;
- } else {
- /* An add of COMPOUND_MAPPED raced ahead */
- nr = 0;
- }
- }
+ folio_remove_entire_rmap(folio, &nr, &nr_pmdmapped);
}
if (nr_pmdmapped) {
Add folio_remove_entire_rmap(). It will handle the entire folio rmap removing. Signed-off-by: Yin Fengwei <fengwei.yin@intel.com> --- mm/rmap.c | 41 +++++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 14 deletions(-)