@@ -456,6 +456,11 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
return __page_memcg(page);
}
+static inline struct mem_cgroup *folio_memcg(struct folio *folio)
+{
+ return page_memcg(&folio->page);
+}
+
/*
* page_memcg_rcu - locklessly get the memory cgroup associated with a page
* @page: a pointer to the page struct
@@ -1052,6 +1057,15 @@ static inline void count_memcg_page_event(struct page *page,
count_memcg_events(memcg, idx, 1);
}
+static inline void count_memcg_folio_event(struct folio *folio,
+ enum vm_event_item idx)
+{
+ struct mem_cgroup *memcg = folio_memcg(folio);
+
+ if (memcg)
+ count_memcg_events(memcg, idx, folio_nr_pages(folio));
+}
+
static inline void count_memcg_event_mm(struct mm_struct *mm,
enum vm_event_item idx)
{
@@ -1473,6 +1487,22 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
}
#endif /* CONFIG_MEMCG */
+static inline void lock_folio_memcg(struct folio *folio)
+{
+ lock_page_memcg(&folio->page);
+}
+
+static inline void unlock_folio_memcg(struct folio *folio)
+{
+ unlock_page_memcg(&folio->page);
+}
+
+static inline struct lruvec *mem_cgroup_folio_lruvec(struct folio *folio,
+ struct pglist_data *pgdat)
+{
+ return mem_cgroup_page_lruvec(&folio->page, pgdat);
+}
+
static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
{
__mod_lruvec_kmem_state(p, idx, 1);
Add new wrapper functions folio_memcg(), lock_folio_memcg(), unlock_folio_memcg(), mem_cgroup_folio_lruvec() and count_memcg_folio_event() Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- include/linux/memcontrol.h | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+)