diff mbox series

[v3,10/18] mm/memcg: Convert mem_cgroup_uncharge() to take a folio

Message ID 20210630040034.1155892-11-willy@infradead.org (mailing list archive)
State New
Headers show
Series Folio conversion of memcg | expand

Commit Message

Matthew Wilcox June 30, 2021, 4 a.m. UTC
Convert all the callers to call page_folio().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/memcontrol.h |  4 ++--
 mm/filemap.c               |  2 +-
 mm/khugepaged.c            |  4 ++--
 mm/memcontrol.c            | 14 +++++++-------
 mm/memory-failure.c        |  2 +-
 mm/memremap.c              |  2 +-
 mm/page_alloc.c            |  2 +-
 mm/swap.c                  |  2 +-
 8 files changed, 16 insertions(+), 16 deletions(-)

Comments

kernel test robot June 30, 2021, 8:46 a.m. UTC | #1
Hi "Matthew,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on next-20210630]
[cannot apply to hnaz-linux-mm/master tip/perf/core cgroup/for-next v5.13]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Matthew-Wilcox-Oracle/Folio-conversion-of-memcg/20210630-121408
base:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 007b350a58754a93ca9fe50c498cc27780171153
config: sparc64-randconfig-r002-20210628 (attached as .config)
compiler: sparc64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/b527e805e7d7066a8fea14ff4a49f53454c355a1
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Matthew-Wilcox-Oracle/Folio-conversion-of-memcg/20210630-121408
        git checkout b527e805e7d7066a8fea14ff4a49f53454c355a1
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=sparc64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

     403 |  VM_BUG_ON_FOLIO(folio_slab(folio), folio);
         |  ^~~~~~~~~~~~~~~
         |  VM_BUG_ON_MM
   include/linux/memcontrol.h:403:18: error: implicit declaration of function 'folio_slab' [-Werror=implicit-function-declaration]
     403 |  VM_BUG_ON_FOLIO(folio_slab(folio), folio);
         |                  ^~~~~~~~~~
   include/linux/memcontrol.h: At top level:
   include/linux/memcontrol.h:420:55: warning: 'struct folio' declared inside parameter list will not be visible outside of this definition or declaration
     420 | static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
         |                                                       ^~~~~
   include/linux/memcontrol.h: In function '__folio_objcg':
   include/linux/memcontrol.h:422:34: error: dereferencing pointer to incomplete type 'struct folio'
     422 |  unsigned long memcg_data = folio->memcg_data;
         |                                  ^~
   include/linux/memcontrol.h: At top level:
   include/linux/memcontrol.h:451:53: warning: 'struct folio' declared inside parameter list will not be visible outside of this definition or declaration
     451 | static inline struct mem_cgroup *folio_memcg(struct folio *folio)
         |                                                     ^~~~~
   include/linux/memcontrol.h: In function 'folio_memcg':
   include/linux/memcontrol.h:453:23: error: passing argument 1 of 'folio_memcg_kmem' from incompatible pointer type [-Werror=incompatible-pointer-types]
     453 |  if (folio_memcg_kmem(folio))
         |                       ^~~~~
         |                       |
         |                       struct folio *
   include/linux/memcontrol.h:375:51: note: expected 'struct folio *' but argument is of type 'struct folio *'
     375 | static inline bool folio_memcg_kmem(struct folio *folio);
         |                                     ~~~~~~~~~~~~~~^~~~~
   include/linux/memcontrol.h:454:41: error: passing argument 1 of '__folio_objcg' from incompatible pointer type [-Werror=incompatible-pointer-types]
     454 |   return obj_cgroup_memcg(__folio_objcg(folio));
         |                                         ^~~~~
         |                                         |
         |                                         struct folio *
   include/linux/memcontrol.h:420:62: note: expected 'struct folio *' but argument is of type 'struct folio *'
     420 | static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
         |                                                ~~~~~~~~~~~~~~^~~~~
   include/linux/memcontrol.h:456:24: error: passing argument 1 of '__folio_memcg' from incompatible pointer type [-Werror=incompatible-pointer-types]
     456 |   return __folio_memcg(folio);
         |                        ^~~~~
         |                        |
         |                        struct folio *
   include/linux/memcontrol.h:399:62: note: expected 'struct folio *' but argument is of type 'struct folio *'
     399 | static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
         |                                                ~~~~~~~~~~~~~~^~~~~
   include/linux/memcontrol.h: In function 'page_memcg':
   include/linux/memcontrol.h:461:21: error: implicit declaration of function 'page_folio' [-Werror=implicit-function-declaration]
     461 |  return folio_memcg(page_folio(page));
         |                     ^~~~~~~~~~
   include/linux/memcontrol.h:461:21: warning: passing argument 1 of 'folio_memcg' makes pointer from integer without a cast [-Wint-conversion]
     461 |  return folio_memcg(page_folio(page));
         |                     ^~~~~~~~~~~~~~~~
         |                     |
         |                     int
   include/linux/memcontrol.h:451:60: note: expected 'struct folio *' but argument is of type 'int'
     451 | static inline struct mem_cgroup *folio_memcg(struct folio *folio)
         |                                              ~~~~~~~~~~~~~~^~~~~
   include/linux/memcontrol.h: At top level:
   include/linux/memcontrol.h:589:44: warning: 'struct folio' declared inside parameter list will not be visible outside of this definition or declaration
     589 | static inline bool folio_memcg_kmem(struct folio *folio)
         |                                            ^~~~~
   include/linux/memcontrol.h:589:20: error: conflicting types for 'folio_memcg_kmem'
     589 | static inline bool folio_memcg_kmem(struct folio *folio)
         |                    ^~~~~~~~~~~~~~~~
   include/linux/memcontrol.h:375:20: note: previous declaration of 'folio_memcg_kmem' was here
     375 | static inline bool folio_memcg_kmem(struct folio *folio);
         |                    ^~~~~~~~~~~~~~~~
   include/linux/memcontrol.h: In function 'PageMemcgKmem':
   include/linux/memcontrol.h:607:26: warning: passing argument 1 of 'folio_memcg_kmem' makes pointer from integer without a cast [-Wint-conversion]
     607 |  return folio_memcg_kmem(page_folio(page));
         |                          ^~~~~~~~~~~~~~~~
         |                          |
         |                          int
   include/linux/memcontrol.h:589:51: note: expected 'struct folio *' but argument is of type 'int'
     589 | static inline bool folio_memcg_kmem(struct folio *folio)
         |                                     ~~~~~~~~~~~~~~^~~~~
   include/linux/memcontrol.h: At top level:
   include/linux/memcontrol.h:708:30: warning: 'struct folio' declared inside parameter list will not be visible outside of this definition or declaration
     708 | int mem_cgroup_charge(struct folio *, struct mm_struct *, gfp_t);
         |                              ^~~~~
   include/linux/memcontrol.h:713:33: warning: 'struct folio' declared inside parameter list will not be visible outside of this definition or declaration
     713 | void mem_cgroup_uncharge(struct folio *);
         |                                 ^~~~~
   In file included from arch/sparc/include/asm/bug.h:6,
                    from include/linux/bug.h:5,
                    from include/linux/mmdebug.h:5,
                    from include/linux/mm.h:9,
                    from mm/khugepaged.c:4:
   mm/khugepaged.c: In function 'collapse_huge_page':
   mm/khugepaged.c:1091:33: warning: passing argument 1 of 'mem_cgroup_charge' makes pointer from integer without a cast [-Wint-conversion]
    1091 |  if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
         |                                 ^~~~~~~~~~~~~~~~~~~~
         |                                 |
         |                                 int
   include/linux/compiler.h:78:42: note: in definition of macro 'unlikely'
      78 | # define unlikely(x) __builtin_expect(!!(x), 0)
         |                                          ^
   In file included from include/linux/rmap.h:12,
                    from mm/khugepaged.c:9:
   include/linux/memcontrol.h:708:23: note: expected 'struct folio *' but argument is of type 'int'
     708 | int mem_cgroup_charge(struct folio *, struct mm_struct *, gfp_t);
         |                       ^~~~~~~~~~~~~~
>> mm/khugepaged.c:1215:23: warning: passing argument 1 of 'mem_cgroup_uncharge' makes pointer from integer without a cast [-Wint-conversion]
    1215 |   mem_cgroup_uncharge(page_folio(*hpage));
         |                       ^~~~~~~~~~~~~~~~~~
         |                       |
         |                       int
   In file included from include/linux/rmap.h:12,
                    from mm/khugepaged.c:9:
   include/linux/memcontrol.h:713:26: note: expected 'struct folio *' but argument is of type 'int'
     713 | void mem_cgroup_uncharge(struct folio *);
         |                          ^~~~~~~~~~~~~~
   mm/khugepaged.c: At top level:
   include/linux/memcontrol.h:375:20: warning: 'folio_memcg_kmem' used but never defined
     375 | static inline bool folio_memcg_kmem(struct folio *folio);
         |                    ^~~~~~~~~~~~~~~~
   cc1: some warnings being treated as errors


vim +/mem_cgroup_uncharge +1215 mm/khugepaged.c

  1056	
  1057	static void collapse_huge_page(struct mm_struct *mm,
  1058					   unsigned long address,
  1059					   struct page **hpage,
  1060					   int node, int referenced, int unmapped)
  1061	{
  1062		LIST_HEAD(compound_pagelist);
  1063		pmd_t *pmd, _pmd;
  1064		pte_t *pte;
  1065		pgtable_t pgtable;
  1066		struct page *new_page;
  1067		spinlock_t *pmd_ptl, *pte_ptl;
  1068		int isolated = 0, result = 0;
  1069		struct vm_area_struct *vma;
  1070		struct mmu_notifier_range range;
  1071		gfp_t gfp;
  1072	
  1073		VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1074	
  1075		/* Only allocate from the target node */
  1076		gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
  1077	
  1078		/*
  1079		 * Before allocating the hugepage, release the mmap_lock read lock.
  1080		 * The allocation can take potentially a long time if it involves
  1081		 * sync compaction, and we do not need to hold the mmap_lock during
  1082		 * that. We will recheck the vma after taking it again in write mode.
  1083		 */
  1084		mmap_read_unlock(mm);
  1085		new_page = khugepaged_alloc_page(hpage, gfp, node);
  1086		if (!new_page) {
  1087			result = SCAN_ALLOC_HUGE_PAGE_FAIL;
  1088			goto out_nolock;
  1089		}
  1090	
  1091		if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
  1092			result = SCAN_CGROUP_CHARGE_FAIL;
  1093			goto out_nolock;
  1094		}
  1095		count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
  1096	
  1097		mmap_read_lock(mm);
  1098		result = hugepage_vma_revalidate(mm, address, &vma);
  1099		if (result) {
  1100			mmap_read_unlock(mm);
  1101			goto out_nolock;
  1102		}
  1103	
  1104		pmd = mm_find_pmd(mm, address);
  1105		if (!pmd) {
  1106			result = SCAN_PMD_NULL;
  1107			mmap_read_unlock(mm);
  1108			goto out_nolock;
  1109		}
  1110	
  1111		/*
  1112		 * __collapse_huge_page_swapin always returns with mmap_lock locked.
  1113		 * If it fails, we release mmap_lock and jump out_nolock.
  1114		 * Continuing to collapse causes inconsistency.
  1115		 */
  1116		if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
  1117							     pmd, referenced)) {
  1118			mmap_read_unlock(mm);
  1119			goto out_nolock;
  1120		}
  1121	
  1122		mmap_read_unlock(mm);
  1123		/*
  1124		 * Prevent all access to pagetables with the exception of
  1125		 * gup_fast later handled by the ptep_clear_flush and the VM
  1126		 * handled by the anon_vma lock + PG_lock.
  1127		 */
  1128		mmap_write_lock(mm);
  1129		result = hugepage_vma_revalidate(mm, address, &vma);
  1130		if (result)
  1131			goto out_up_write;
  1132		/* check if the pmd is still valid */
  1133		if (mm_find_pmd(mm, address) != pmd)
  1134			goto out_up_write;
  1135	
  1136		anon_vma_lock_write(vma->anon_vma);
  1137	
  1138		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
  1139					address, address + HPAGE_PMD_SIZE);
  1140		mmu_notifier_invalidate_range_start(&range);
  1141	
  1142		pte = pte_offset_map(pmd, address);
  1143		pte_ptl = pte_lockptr(mm, pmd);
  1144	
  1145		pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
  1146		/*
  1147		 * After this gup_fast can't run anymore. This also removes
  1148		 * any huge TLB entry from the CPU so we won't allow
  1149		 * huge and small TLB entries for the same virtual address
  1150		 * to avoid the risk of CPU bugs in that area.
  1151		 */
  1152		_pmd = pmdp_collapse_flush(vma, address, pmd);
  1153		spin_unlock(pmd_ptl);
  1154		mmu_notifier_invalidate_range_end(&range);
  1155	
  1156		spin_lock(pte_ptl);
  1157		isolated = __collapse_huge_page_isolate(vma, address, pte,
  1158				&compound_pagelist);
  1159		spin_unlock(pte_ptl);
  1160	
  1161		if (unlikely(!isolated)) {
  1162			pte_unmap(pte);
  1163			spin_lock(pmd_ptl);
  1164			BUG_ON(!pmd_none(*pmd));
  1165			/*
  1166			 * We can only use set_pmd_at when establishing
  1167			 * hugepmds and never for establishing regular pmds that
  1168			 * points to regular pagetables. Use pmd_populate for that
  1169			 */
  1170			pmd_populate(mm, pmd, pmd_pgtable(_pmd));
  1171			spin_unlock(pmd_ptl);
  1172			anon_vma_unlock_write(vma->anon_vma);
  1173			result = SCAN_FAIL;
  1174			goto out_up_write;
  1175		}
  1176	
  1177		/*
  1178		 * All pages are isolated and locked so anon_vma rmap
  1179		 * can't run anymore.
  1180		 */
  1181		anon_vma_unlock_write(vma->anon_vma);
  1182	
  1183		__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
  1184				&compound_pagelist);
  1185		pte_unmap(pte);
  1186		/*
  1187		 * spin_lock() below is not the equivalent of smp_wmb(), but
  1188		 * the smp_wmb() inside __SetPageUptodate() can be reused to
  1189		 * avoid the copy_huge_page writes to become visible after
  1190		 * the set_pmd_at() write.
  1191		 */
  1192		__SetPageUptodate(new_page);
  1193		pgtable = pmd_pgtable(_pmd);
  1194	
  1195		_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
  1196		_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
  1197	
  1198		spin_lock(pmd_ptl);
  1199		BUG_ON(!pmd_none(*pmd));
  1200		page_add_new_anon_rmap(new_page, vma, address, true);
  1201		lru_cache_add_inactive_or_unevictable(new_page, vma);
  1202		pgtable_trans_huge_deposit(mm, pmd, pgtable);
  1203		set_pmd_at(mm, address, pmd, _pmd);
  1204		update_mmu_cache_pmd(vma, address, pmd);
  1205		spin_unlock(pmd_ptl);
  1206	
  1207		*hpage = NULL;
  1208	
  1209		khugepaged_pages_collapsed++;
  1210		result = SCAN_SUCCEED;
  1211	out_up_write:
  1212		mmap_write_unlock(mm);
  1213	out_nolock:
  1214		if (!IS_ERR_OR_NULL(*hpage))
> 1215			mem_cgroup_uncharge(page_folio(*hpage));
  1216		trace_mm_collapse_huge_page(mm, isolated, result);
  1217		return;
  1218	}
  1219	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Christoph Hellwig July 1, 2021, 7:17 a.m. UTC | #2
On Wed, Jun 30, 2021 at 05:00:26AM +0100, Matthew Wilcox (Oracle) wrote:
> Convert all the callers to call page_folio().
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  include/linux/memcontrol.h |  4 ++--
>  mm/filemap.c               |  2 +-
>  mm/khugepaged.c            |  4 ++--
>  mm/memcontrol.c            | 14 +++++++-------
>  mm/memory-failure.c        |  2 +-
>  mm/memremap.c              |  2 +-
>  mm/page_alloc.c            |  2 +-
>  mm/swap.c                  |  2 +-
>  8 files changed, 16 insertions(+), 16 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 90d48b0e3191..d6386a2b9d7a 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -710,7 +710,7 @@ int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
>  				  gfp_t gfp, swp_entry_t entry);
>  void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
>  
> -void mem_cgroup_uncharge(struct page *page);
> +void mem_cgroup_uncharge(struct folio *);

why do you drop the parameter name?

Otherwise looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>
Matthew Wilcox July 7, 2021, 12:09 p.m. UTC | #3
On Thu, Jul 01, 2021 at 08:17:48AM +0100, Christoph Hellwig wrote:
> On Wed, Jun 30, 2021 at 05:00:26AM +0100, Matthew Wilcox (Oracle) wrote:
> > -void mem_cgroup_uncharge(struct page *page);
> > +void mem_cgroup_uncharge(struct folio *);
> 
> why do you drop the parameter name?

I usually do where it's 'struct foo *foo' or 'foo_t foo'.  If the best
you can do is say "it's a page", well, yes, I knew that from the type.
But since you've complained, I've added the pointless 'folio' to it.

> Otherwise looks good:
> 
> Reviewed-by: Christoph Hellwig <hch@lst.de>
diff mbox series

Patch

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 90d48b0e3191..d6386a2b9d7a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -710,7 +710,7 @@  int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
 				  gfp_t gfp, swp_entry_t entry);
 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
 
-void mem_cgroup_uncharge(struct page *page);
+void mem_cgroup_uncharge(struct folio *);
 void mem_cgroup_uncharge_list(struct list_head *page_list);
 
 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
@@ -1202,7 +1202,7 @@  static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
 {
 }
 
-static inline void mem_cgroup_uncharge(struct page *page)
+static inline void mem_cgroup_uncharge(struct folio *folio)
 {
 }
 
diff --git a/mm/filemap.c b/mm/filemap.c
index 9600bca84162..0008ada132c4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -923,7 +923,7 @@  noinline int __add_to_page_cache_locked(struct page *page,
 	if (xas_error(&xas)) {
 		error = xas_error(&xas);
 		if (charged)
-			mem_cgroup_uncharge(page);
+			mem_cgroup_uncharge(page_folio(page));
 		goto error;
 	}
 
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 0daa21fbdd71..988a230c7a41 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1212,7 +1212,7 @@  static void collapse_huge_page(struct mm_struct *mm,
 	mmap_write_unlock(mm);
 out_nolock:
 	if (!IS_ERR_OR_NULL(*hpage))
-		mem_cgroup_uncharge(*hpage);
+		mem_cgroup_uncharge(page_folio(*hpage));
 	trace_mm_collapse_huge_page(mm, isolated, result);
 	return;
 }
@@ -1963,7 +1963,7 @@  static void collapse_file(struct mm_struct *mm,
 out:
 	VM_BUG_ON(!list_empty(&pagelist));
 	if (!IS_ERR_OR_NULL(*hpage))
-		mem_cgroup_uncharge(*hpage);
+		mem_cgroup_uncharge(page_folio(*hpage));
 	/* TODO: tracepoints */
 }
 
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 21b791935957..90a53f554371 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6892,24 +6892,24 @@  static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
 }
 
 /**
- * mem_cgroup_uncharge - uncharge a page
- * @page: page to uncharge
+ * mem_cgroup_uncharge - Uncharge a folio.
+ * @folio: Folio to uncharge.
  *
- * Uncharge a page previously charged with mem_cgroup_charge().
+ * Uncharge a folio previously charged with folio_charge_cgroup().
  */
-void mem_cgroup_uncharge(struct page *page)
+void mem_cgroup_uncharge(struct folio *folio)
 {
 	struct uncharge_gather ug;
 
 	if (mem_cgroup_disabled())
 		return;
 
-	/* Don't touch page->lru of any random page, pre-check: */
-	if (!page_memcg(page))
+	/* Don't touch folio->lru of any random page, pre-check: */
+	if (!folio_memcg(folio))
 		return;
 
 	uncharge_gather_clear(&ug);
-	uncharge_folio(page_folio(page), &ug);
+	uncharge_folio(folio, &ug);
 	uncharge_batch(&ug);
 }
 
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index e5a1531f7f4e..7ada5959b5ad 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -750,7 +750,7 @@  static int delete_from_lru_cache(struct page *p)
 		 * Poisoned page might never drop its ref count to 0 so we have
 		 * to uncharge it manually from its memcg.
 		 */
-		mem_cgroup_uncharge(p);
+		mem_cgroup_uncharge(page_folio(p));
 
 		/*
 		 * drop the page count elevated by isolate_lru_page()
diff --git a/mm/memremap.c b/mm/memremap.c
index 15a074ffb8d7..6eac40f9f62a 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -508,7 +508,7 @@  void free_devmap_managed_page(struct page *page)
 
 	__ClearPageWaiters(page);
 
-	mem_cgroup_uncharge(page);
+	mem_cgroup_uncharge(page_folio(page));
 
 	/*
 	 * When a device_private page is freed, the page->mapping field
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0817d88383d5..5a5fcd4f21a8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -737,7 +737,7 @@  static inline void free_the_page(struct page *page, unsigned int order)
 
 void free_compound_page(struct page *page)
 {
-	mem_cgroup_uncharge(page);
+	mem_cgroup_uncharge(page_folio(page));
 	free_the_page(page, compound_order(page));
 }
 
diff --git a/mm/swap.c b/mm/swap.c
index 6954cfebab4f..8ba62a930370 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -94,7 +94,7 @@  static void __page_cache_release(struct page *page)
 static void __put_single_page(struct page *page)
 {
 	__page_cache_release(page);
-	mem_cgroup_uncharge(page);
+	mem_cgroup_uncharge(page_folio(page));
 	free_unref_page(page, 0);
 }