@@ -329,7 +329,7 @@ void __shmem_writeback(size_t size, struct address_space *mapping)
if (!folio_mapped(folio) && folio_clear_dirty_for_io(folio)) {
int ret;
- folio_set_dropbehind(folio);
+ folio_set_reclaim(folio);
ret = mapping->a_ops->writepage(&folio->page, &wbc);
if (!ret)
goto put;
@@ -241,7 +241,7 @@ static inline unsigned long lru_gen_folio_seq(struct lruvec *lruvec, struct foli
else if (reclaiming)
gen = MAX_NR_GENS;
else if ((!folio_is_file_lru(folio) && !folio_test_swapcache(folio)) ||
- folio_test_dropbehind(folio))
+ folio_test_reclaim(folio))
gen = MIN_NR_GENS;
else
gen = MAX_NR_GENS - folio_test_workingset(folio);
@@ -110,7 +110,7 @@ enum pageflags {
PG_readahead,
PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */
- PG_dropbehind, /* drop pages on IO completion */
+ PG_reclaim, /* drop pages on IO completion */
#ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */
#endif
@@ -595,9 +595,9 @@ FOLIO_FLAG(mappedtodisk, FOLIO_HEAD_PAGE)
FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE)
FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE)
-FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE)
- FOLIO_TEST_CLEAR_FLAG(dropbehind, FOLIO_HEAD_PAGE)
- __FOLIO_SET_FLAG(dropbehind, FOLIO_HEAD_PAGE)
+FOLIO_FLAG(reclaim, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(reclaim, FOLIO_HEAD_PAGE)
+ __FOLIO_SET_FLAG(reclaim, FOLIO_HEAD_PAGE)
#ifdef CONFIG_HIGHMEM
/*
@@ -1371,7 +1371,7 @@ struct readahead_control {
pgoff_t _index;
unsigned int _nr_pages;
unsigned int _batch_count;
- bool dropbehind;
+ bool reclaim;
bool _workingset;
unsigned long _pflags;
};
@@ -117,7 +117,7 @@
DEF_PAGEFLAG_NAME(readahead), \
DEF_PAGEFLAG_NAME(swapbacked), \
DEF_PAGEFLAG_NAME(unevictable), \
- DEF_PAGEFLAG_NAME(dropbehind) \
+ DEF_PAGEFLAG_NAME(reclaim) \
IF_HAVE_PG_MLOCK(mlocked) \
IF_HAVE_PG_HWPOISON(hwpoison) \
IF_HAVE_PG_IDLE(idle) \
@@ -1590,11 +1590,11 @@ int folio_wait_private_2_killable(struct folio *folio)
EXPORT_SYMBOL(folio_wait_private_2_killable);
/*
- * If folio was marked as dropbehind, then pages should be dropped when writeback
+ * If folio was marked as reclaim, then pages should be dropped when writeback
* completes. Do that now. If we fail, it's likely because of a big folio -
- * just reset dropbehind for that case and latter completions should invalidate.
+ * just reset reclaim for that case and latter completions should invalidate.
*/
-static void folio_end_dropbehind_write(struct folio *folio)
+static void folio_end_reclaim_write(struct folio *folio)
{
/*
* Hitting !in_task() should not happen off RWF_DONTCACHE writeback,
@@ -1620,7 +1620,7 @@ static void folio_end_dropbehind_write(struct folio *folio)
*/
void folio_end_writeback(struct folio *folio)
{
- bool folio_dropbehind = false;
+ bool folio_reclaim = false;
VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio);
@@ -1632,13 +1632,13 @@ void folio_end_writeback(struct folio *folio)
*/
folio_get(folio);
if (!folio_test_dirty(folio))
- folio_dropbehind = folio_test_clear_dropbehind(folio);
+ folio_reclaim = folio_test_clear_reclaim(folio);
if (__folio_end_writeback(folio))
folio_wake_bit(folio, PG_writeback);
acct_reclaim_writeback(folio);
- if (folio_dropbehind)
- folio_end_dropbehind_write(folio);
+ if (folio_reclaim)
+ folio_end_reclaim_write(folio);
folio_put(folio);
}
EXPORT_SYMBOL(folio_end_writeback);
@@ -1962,7 +1962,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
if (fgp_flags & FGP_ACCESSED)
__folio_set_referenced(folio);
if (fgp_flags & FGP_DONTCACHE)
- __folio_set_dropbehind(folio);
+ __folio_set_reclaim(folio);
err = filemap_add_folio(mapping, folio, index, gfp);
if (!err)
@@ -1986,8 +1986,8 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
if (!folio)
return ERR_PTR(-ENOENT);
/* not an uncached lookup, clear uncached if set */
- if (folio_test_dropbehind(folio) && !(fgp_flags & FGP_DONTCACHE))
- folio_clear_dropbehind(folio);
+ if (folio_test_reclaim(folio) && !(fgp_flags & FGP_DONTCACHE))
+ folio_clear_reclaim(folio);
return folio;
}
EXPORT_SYMBOL(__filemap_get_folio);
@@ -2485,7 +2485,7 @@ static int filemap_create_folio(struct kiocb *iocb, struct folio_batch *fbatch)
if (!folio)
return -ENOMEM;
if (iocb->ki_flags & IOCB_DONTCACHE)
- __folio_set_dropbehind(folio);
+ __folio_set_reclaim(folio);
/*
* Protect against truncate / hole punch. Grabbing invalidate_lock
@@ -2532,7 +2532,7 @@ static int filemap_readahead(struct kiocb *iocb, struct file *file,
if (iocb->ki_flags & IOCB_NOIO)
return -EAGAIN;
if (iocb->ki_flags & IOCB_DONTCACHE)
- ractl.dropbehind = 1;
+ ractl.reclaim = 1;
page_cache_async_ra(&ractl, folio, last_index - folio->index);
return 0;
}
@@ -2563,7 +2563,7 @@ static int filemap_get_pages(struct kiocb *iocb, size_t count,
if (iocb->ki_flags & IOCB_NOWAIT)
flags = memalloc_noio_save();
if (iocb->ki_flags & IOCB_DONTCACHE)
- ractl.dropbehind = 1;
+ ractl.reclaim = 1;
page_cache_sync_ra(&ractl, last_index - index);
if (iocb->ki_flags & IOCB_NOWAIT)
memalloc_noio_restore(flags);
@@ -2611,15 +2611,15 @@ static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
return (pos1 >> shift == pos2 >> shift);
}
-static void filemap_end_dropbehind_read(struct address_space *mapping,
+static void filemap_end_reclaim_read(struct address_space *mapping,
struct folio *folio)
{
- if (!folio_test_dropbehind(folio))
+ if (!folio_test_reclaim(folio))
return;
if (folio_test_writeback(folio) || folio_test_dirty(folio))
return;
if (folio_trylock(folio)) {
- if (folio_test_clear_dropbehind(folio))
+ if (folio_test_clear_reclaim(folio))
folio_unmap_invalidate(mapping, folio, 0);
folio_unlock(folio);
}
@@ -2741,7 +2741,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i];
- filemap_end_dropbehind_read(mapping, folio);
+ filemap_end_reclaim_read(mapping, folio);
folio_put(folio);
}
folio_batch_init(&fbatch);
@@ -683,8 +683,8 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
folio_set_dirty(newfolio);
/* TODO: free the folio on migration? */
- if (folio_test_dropbehind(folio))
- folio_set_dropbehind(newfolio);
+ if (folio_test_reclaim(folio))
+ folio_set_reclaim(newfolio);
if (folio_test_young(folio))
folio_set_young(newfolio);
@@ -184,8 +184,8 @@ static struct folio *ractl_alloc_folio(struct readahead_control *ractl,
struct folio *folio;
folio = filemap_alloc_folio(gfp_mask, order);
- if (folio && ractl->dropbehind)
- __folio_set_dropbehind(folio);
+ if (folio && ractl->reclaim)
+ __folio_set_reclaim(folio);
return folio;
}
@@ -406,7 +406,7 @@ static bool lru_gen_clear_refs(struct folio *folio)
*/
void folio_mark_accessed(struct folio *folio)
{
- if (folio_test_dropbehind(folio))
+ if (folio_test_reclaim(folio))
return;
if (lru_gen_enabled()) {
lru_gen_inc_refs(folio);
@@ -486,7 +486,7 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
* of interest and try to speed up its reclaim.
*/
if (!ret) {
- folio_set_dropbehind(folio);
+ folio_set_reclaim(folio);
/* Likely in the lru cache of a remote CPU */
if (nr_failed)
(*nr_failed)++;
@@ -692,13 +692,13 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
if (shmem_mapping(mapping) && folio_test_large(folio))
wbc.list = folio_list;
- folio_set_dropbehind(folio);
+ folio_set_reclaim(folio);
res = mapping->a_ops->writepage(&folio->page, &wbc);
if (res < 0)
handle_write_error(mapping, folio, res);
if (res == AOP_WRITEPAGE_ACTIVATE) {
- folio_clear_dropbehind(folio);
+ folio_clear_reclaim(folio);
return PAGE_ACTIVATE;
}
@@ -1140,7 +1140,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
* for immediate reclaim are making it to the end of
* the LRU a second time.
*/
- if (writeback && folio_test_dropbehind(folio))
+ if (writeback && folio_test_reclaim(folio))
stat->nr_congested += nr_pages;
/*
@@ -1149,7 +1149,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
*
* 1) If reclaim is encountering an excessive number
* of folios under writeback and this folio has both
- * the writeback and dropbehind flags set, then it
+ * the writeback and reclaim flags set, then it
* indicates that folios are being queued for I/O but
* are being recycled through the LRU before the I/O
* can complete. Waiting on the folio itself risks an
@@ -1174,7 +1174,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
* would probably show more reasons.
*
* 3) Legacy memcg encounters a folio that already has the
- * dropbehind flag set. memcg does not have any dirty folio
+ * reclaim flag set. memcg does not have any dirty folio
* throttling so we could easily OOM just because too many
* folios are in writeback and there is nothing else to
* reclaim. Wait for the writeback to complete.
@@ -1193,17 +1193,17 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
/* Case 1 above */
if (current_is_kswapd() &&
- folio_test_dropbehind(folio) &&
+ folio_test_reclaim(folio) &&
test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
stat->nr_immediate += nr_pages;
goto activate_locked;
/* Case 2 above */
} else if (writeback_throttling_sane(sc) ||
- !folio_test_dropbehind(folio) ||
+ !folio_test_reclaim(folio) ||
!may_enter_fs(folio, sc->gfp_mask) ||
(mapping && mapping_writeback_indeterminate(mapping))) {
- folio_set_dropbehind(folio);
+ folio_set_reclaim(folio);
stat->nr_writeback += nr_pages;
goto activate_locked;
@@ -1235,7 +1235,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
* Before reclaiming the folio, try to relocate
* its contents to another node.
*/
- if (do_demote_pass && !folio_test_dropbehind(folio) &&
+ if (do_demote_pass && !folio_test_reclaim(folio) &&
(thp_migration_supported() || !folio_test_large(folio))) {
list_add(&folio->lru, &demote_folios);
folio_unlock(folio);
@@ -1358,7 +1358,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
*/
if (folio_is_file_lru(folio) &&
(!current_is_kswapd() ||
- !folio_test_dropbehind(folio) ||
+ !folio_test_reclaim(folio) ||
!test_bit(PGDAT_DIRTY, &pgdat->flags))) {
/*
* Immediately reclaim when written back.
@@ -1368,7 +1368,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
*/
node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE,
nr_pages);
- folio_set_dropbehind(folio);
+ folio_set_reclaim(folio);
goto activate_locked;
}
@@ -1097,7 +1097,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
folio_mark_uptodate(folio);
/* free the folio after writeback */
- folio_set_dropbehind(folio);
+ folio_set_reclaim(folio);
/* start writeback */
__swap_writepage(folio, &wbc);
Now as PG_reclaim is gone, its name can be reclaimed for better use :) Rename PG_dropbehind to PG_reclaim and rename all helpers around it. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 2 +- include/linux/mm_inline.h | 2 +- include/linux/page-flags.h | 8 +++--- include/linux/pagemap.h | 2 +- include/trace/events/mmflags.h | 2 +- mm/filemap.c | 34 +++++++++++------------ mm/migrate.c | 4 +-- mm/readahead.c | 4 +-- mm/swap.c | 2 +- mm/truncate.c | 2 +- mm/vmscan.c | 22 +++++++-------- mm/zswap.c | 2 +- 12 files changed, 43 insertions(+), 43 deletions(-)