@@ -319,8 +319,7 @@ void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
update_lru_size(lruvec, lru, folio_zonenum(folio),
folio_nr_pages(folio));
- if (lru != LRU_UNEVICTABLE)
- list_add(&folio->lru, &lruvec->lists[lru]);
+ list_add(&folio->lru, &lruvec->lists[lru]);
}
static __always_inline void add_page_to_lru_list(struct page *page,
@@ -339,21 +338,17 @@ void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
update_lru_size(lruvec, lru, folio_zonenum(folio),
folio_nr_pages(folio));
- /* This is not expected to be used on LRU_UNEVICTABLE */
list_add_tail(&folio->lru, &lruvec->lists[lru]);
}
static __always_inline
void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
{
- enum lru_list lru = folio_lru_list(folio);
-
if (lru_gen_del_folio(lruvec, folio, false))
return;
- if (lru != LRU_UNEVICTABLE)
- list_del(&folio->lru);
- update_lru_size(lruvec, lru, folio_zonenum(folio),
+ list_del(&folio->lru);
+ update_lru_size(lruvec, folio_lru_list(folio), folio_zonenum(folio),
-folio_nr_pages(folio));
}
@@ -2407,8 +2407,7 @@ static void lru_add_page_tail(struct page *head, struct page *tail,
} else {
/* head is still on lru (and we have it frozen) */
VM_WARN_ON(!PageLRU(head));
- if (!PageUnevictable(tail))
- list_add_tail(&tail->lru, &head->lru);
+ list_add_tail(&tail->lru, &head->lru);
SetPageLRU(tail);
}
}
@@ -81,14 +81,6 @@ void lruvec_init(struct lruvec *lruvec)
for_each_lru(lru)
INIT_LIST_HEAD(&lruvec->lists[lru]);
- /*
- * The "Unevictable LRU" is imaginary: though its size is maintained,
- * it is never scanned, and unevictable pages are not threaded on it
- * (so that their lru fields can be reused to hold mlock_count).
- * Poison its list head, so that any operations on it would crash.
- */
- list_del(&lruvec->lists[LRU_UNEVICTABLE]);
-
lru_gen_init_lruvec(lruvec);
}
Now that mlock_count no longer overlays page->lru, revive the unevictable LRU. No need to special case it when adding/removing a folio to the LRUs. This also enables future work that will use the LRUs to find all user folios charged to a memcg, having the unevictable LRU makes sure we are not missing a significant chunk of those. Signed-off-by: Yosry Ahmed <yosryahmed@google.com> --- include/linux/mm_inline.h | 11 +++-------- mm/huge_memory.c | 3 +-- mm/mmzone.c | 8 -------- 3 files changed, 4 insertions(+), 18 deletions(-)