@@ -439,7 +439,6 @@ static inline bool node_reclaim_enabled(void)
}
void check_move_unevictable_folios(struct folio_batch *fbatch);
-void check_move_unevictable_pages(struct pagevec *pvec);
extern void __meminit kswapd_run(int nid);
extern void __meminit kswapd_stop(int nid);
@@ -8074,23 +8074,6 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
}
#endif
-void check_move_unevictable_pages(struct pagevec *pvec)
-{
- struct folio_batch fbatch;
- unsigned i;
-
- folio_batch_init(&fbatch);
- for (i = 0; i < pvec->nr; i++) {
- struct page *page = pvec->pages[i];
-
- if (PageTransTail(page))
- continue;
- folio_batch_add(&fbatch, page_folio(page));
- }
- check_move_unevictable_folios(&fbatch);
-}
-EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
-
/**
* check_move_unevictable_folios - Move evictable folios to appropriate zone
* lru list
All callers have now been converted to call check_move_unevictable_folios(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- include/linux/swap.h | 1 - mm/vmscan.c | 17 ----------------- 2 files changed, 18 deletions(-)