@@ -324,7 +324,7 @@ static inline swp_entry_t folio_swap_entry(struct folio *folio)
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
void workingset_refault(struct page *page, void *shadow);
-void workingset_activation(struct page *page);
+void workingset_activation(struct folio *folio);
/* Only track the nodes of mappings with shadow entries */
void workingset_update_node(struct xa_node *node);
@@ -447,7 +447,7 @@ void mark_page_accessed(struct page *page)
else
__lru_cache_activate_page(page);
ClearPageReferenced(page);
- workingset_activation(page);
+ workingset_activation(page_folio(page));
}
if (page_is_idle(page))
clear_page_idle(page);
@@ -390,9 +390,9 @@ void workingset_refault(struct page *page, void *shadow)
/**
* workingset_activation - note a page activation
- * @page: page that is being activated
+ * @folio: Folio that is being activated.
*/
-void workingset_activation(struct page *page)
+void workingset_activation(struct folio *folio)
{
struct mem_cgroup *memcg;
struct lruvec *lruvec;
@@ -405,11 +405,11 @@ void workingset_activation(struct page *page)
* XXX: See workingset_refault() - this should return
* root_mem_cgroup even for !CONFIG_MEMCG.
*/
- memcg = page_memcg_rcu(page);
+ memcg = page_memcg_rcu(&folio->page);
if (!mem_cgroup_disabled() && !memcg)
goto out;
- lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
- workingset_age_nonresident(lruvec, thp_nr_pages(page));
+ lruvec = mem_cgroup_folio_lruvec(folio);
+ workingset_age_nonresident(lruvec, folio_nr_pages(folio));
out:
rcu_read_unlock();
}
This function already assumed it was being passed a head page. No real change here, except that thp_nr_pages() compiles away on kernels with THP compiled out while folio_nr_pages() is always present. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- include/linux/swap.h | 2 +- mm/swap.c | 2 +- mm/workingset.c | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-)