@@ -1568,12 +1568,11 @@ static unsigned int shrink_page_list(struct list_head *page_list,
stat->nr_unqueued_dirty += nr_pages;
/*
- * Treat this page as congested if the underlying BDI is or if
+ * Treat this page as congested if
* pages are cycling through the LRU so quickly that the
* pages marked for immediate reclaim are making it to the
* end of the LRU a second time.
*/
- mapping = page_mapping(page);
if (writeback && PageReclaim(page))
stat->nr_congested += nr_pages;
@@ -1725,9 +1724,6 @@ static unsigned int shrink_page_list(struct list_head *page_list,
}
may_enter_fs = true;
-
- /* Adding to swap updated mapping */
- mapping = page_mapping(page);
}
} else if (PageSwapBacked(page) && PageTransHuge(page)) {
/* Split shmem THP */
@@ -1768,6 +1764,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
}
}
+ mapping = folio_mapping(folio);
if (folio_test_dirty(folio)) {
/*
* Only kswapd can writeback filesystem folios
Now that we don't interrogate the BDI for congestion, we can delay looking up the folio's mapping until we've got further through the function, reducing register pressure and saving a call to folio_mapping for folios we're adding to the swap cache. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/vmscan.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-)