@@ -379,7 +379,6 @@ static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
vm_fault_t do_swap_page(struct vm_fault *vmf);
void folio_rotate_reclaimable(struct folio *folio);
bool __folio_end_writeback(struct folio *folio);
-void deactivate_file_folio(struct folio *folio);
void folio_activate(struct folio *folio);
void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
@@ -54,7 +54,6 @@ struct cpu_fbatches {
*/
local_lock_t lock;
struct folio_batch lru_add;
- struct folio_batch lru_deactivate_file;
struct folio_batch lru_deactivate;
struct folio_batch lru_lazyfree;
#ifdef CONFIG_SMP
@@ -524,68 +523,6 @@ void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
folio_add_lru(folio);
}
-/*
- * If the folio cannot be invalidated, it is moved to the
- * inactive list to speed up its reclaim. It is moved to the
- * head of the list, rather than the tail, to give the flusher
- * threads some time to write it out, as this is much more
- * effective than the single-page writeout from reclaim.
- *
- * If the folio isn't mapped and dirty/writeback, the folio
- * could be reclaimed asap using the reclaim flag.
- *
- * 1. active, mapped folio -> none
- * 2. active, dirty/writeback folio -> inactive, head, reclaim
- * 3. inactive, mapped folio -> none
- * 4. inactive, dirty/writeback folio -> inactive, head, reclaim
- * 5. inactive, clean -> inactive, tail
- * 6. Others -> none
- *
- * In 4, it moves to the head of the inactive list so the folio is
- * written out by flusher threads as this is much more efficient
- * than the single-page writeout from reclaim.
- */
-static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio)
-{
- bool active = folio_test_active(folio) || lru_gen_enabled();
- long nr_pages = folio_nr_pages(folio);
-
- if (folio_test_unevictable(folio))
- return;
-
- /* Some processes are using the folio */
- if (folio_mapped(folio))
- return;
-
- lruvec_del_folio(lruvec, folio);
- folio_clear_active(folio);
- folio_clear_referenced(folio);
-
- if (folio_test_writeback(folio) || folio_test_dirty(folio)) {
- /*
- * Setting the reclaim flag could race with
- * folio_end_writeback() and confuse readahead. But the
- * race window is _really_ small and it's not a critical
- * problem.
- */
- lruvec_add_folio(lruvec, folio);
- folio_set_reclaim(folio);
- } else {
- /*
- * The folio's writeback ended while it was in the batch.
- * We move that folio to the tail of the inactive list.
- */
- lruvec_add_folio_tail(lruvec, folio);
- __count_vm_events(PGROTATED, nr_pages);
- }
-
- if (active) {
- __count_vm_events(PGDEACTIVATE, nr_pages);
- __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
- nr_pages);
- }
-}
-
static void lru_deactivate(struct lruvec *lruvec, struct folio *folio)
{
long nr_pages = folio_nr_pages(folio);
@@ -652,10 +589,6 @@ void lru_add_drain_cpu(int cpu)
local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
}
- fbatch = &fbatches->lru_deactivate_file;
- if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, lru_deactivate_file);
-
fbatch = &fbatches->lru_deactivate;
if (folio_batch_count(fbatch))
folio_batch_move_lru(fbatch, lru_deactivate);
@@ -667,28 +600,6 @@ void lru_add_drain_cpu(int cpu)
folio_activate_drain(cpu);
}
-/**
- * deactivate_file_folio() - Deactivate a file folio.
- * @folio: Folio to deactivate.
- *
- * This function hints to the VM that @folio is a good reclaim candidate,
- * for example if its invalidation fails due to the folio being dirty
- * or under writeback.
- *
- * Context: Caller holds a reference on the folio.
- */
-void deactivate_file_folio(struct folio *folio)
-{
- /* Deactivating an unevictable folio will not accelerate reclaim */
- if (folio_test_unevictable(folio))
- return;
-
- if (lru_gen_enabled() && lru_gen_clear_refs(folio))
- return;
-
- folio_batch_add_and_move(folio, lru_deactivate_file, true);
-}
-
/*
* folio_deactivate - deactivate a folio
* @folio: folio to deactivate
@@ -772,7 +683,6 @@ static bool cpu_needs_drain(unsigned int cpu)
/* Check these in order of likelihood that they're not zero */
return folio_batch_count(&fbatches->lru_add) ||
folio_batch_count(&fbatches->lru_move_tail) ||
- folio_batch_count(&fbatches->lru_deactivate_file) ||
folio_batch_count(&fbatches->lru_deactivate) ||
folio_batch_count(&fbatches->lru_lazyfree) ||
folio_batch_count(&fbatches->lru_activate) ||
@@ -486,7 +486,7 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
* of interest and try to speed up its reclaim.
*/
if (!ret) {
- deactivate_file_folio(folio);
+ folio_set_dropbehind(folio);
/* Likely in the lru cache of a remote CPU */
if (nr_failed)
(*nr_failed)++;
The recently introduced PG_dropbehind allows for freeing folios immediately after writeback. Unlike PG_reclaim, it does not need vmscan to be involved to get the folio freed. The new flag allows to replace whole deactivate_file_folio() machinery with simple folio_set_dropbehind(). Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- mm/internal.h | 1 - mm/swap.c | 90 --------------------------------------------------- mm/truncate.c | 2 +- 3 files changed, 1 insertion(+), 92 deletions(-)