diff mbox series

[1/3] madvise: Convert madvise_cold_or_pageout_pte_range() to use folios

Message ID 20221207002158.418789-2-vishal.moola@gmail.com (mailing list archive)
State New
Headers show
Series Convert deactivate_page() to deactivate_folio() | expand

Commit Message

Vishal Moola Dec. 7, 2022, 12:21 a.m. UTC
This change removes a number of calls to compound_head(), and saves 1319
bytes of kernel text.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 mm/madvise.c | 88 +++++++++++++++++++++++++++-------------------------
 1 file changed, 45 insertions(+), 43 deletions(-)

Comments

kernel test robot Dec. 7, 2022, 9:47 a.m. UTC | #1
Hi Vishal,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on linus/master]
[also build test ERROR on v6.1-rc8]
[cannot apply to akpm-mm/mm-everything next-20221207]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Vishal-Moola-Oracle/Convert-deactivate_page-to-deactivate_folio/20221207-082339
patch link:    https://lore.kernel.org/r/20221207002158.418789-2-vishal.moola%40gmail.com
patch subject: [PATCH 1/3] madvise: Convert madvise_cold_or_pageout_pte_range() to use folios
config: hexagon-randconfig-r041-20221206
compiler: clang version 16.0.0 (https://github.com/llvm/llvm-project 6e4cea55f0d1104408b26ac574566a0e4de48036)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/670bf0d6d5222d0c92e72b7869d25fa64f582082
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Vishal-Moola-Oracle/Convert-deactivate_page-to-deactivate_folio/20221207-082339
        git checkout 670bf0d6d5222d0c92e72b7869d25fa64f582082
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=hexagon SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   In file included from mm/madvise.c:10:
   In file included from include/linux/pagemap.h:11:
   In file included from include/linux/highmem.h:12:
   In file included from include/linux/hardirq.h:11:
   In file included from ./arch/hexagon/include/generated/asm/hardirq.h:1:
   In file included from include/asm-generic/hardirq.h:17:
   In file included from include/linux/irq.h:20:
   In file included from include/linux/io.h:13:
   In file included from arch/hexagon/include/asm/io.h:334:
   include/asm-generic/io.h:547:31: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __raw_readb(PCI_IOBASE + addr);
                             ~~~~~~~~~~ ^
   include/asm-generic/io.h:560:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr));
                                                           ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/little_endian.h:37:51: note: expanded from macro '__le16_to_cpu'
   #define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
                                                     ^
   In file included from mm/madvise.c:10:
   In file included from include/linux/pagemap.h:11:
   In file included from include/linux/highmem.h:12:
   In file included from include/linux/hardirq.h:11:
   In file included from ./arch/hexagon/include/generated/asm/hardirq.h:1:
   In file included from include/asm-generic/hardirq.h:17:
   In file included from include/linux/irq.h:20:
   In file included from include/linux/io.h:13:
   In file included from arch/hexagon/include/asm/io.h:334:
   include/asm-generic/io.h:573:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
                                                           ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/little_endian.h:35:51: note: expanded from macro '__le32_to_cpu'
   #define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
                                                     ^
   In file included from mm/madvise.c:10:
   In file included from include/linux/pagemap.h:11:
   In file included from include/linux/highmem.h:12:
   In file included from include/linux/hardirq.h:11:
   In file included from ./arch/hexagon/include/generated/asm/hardirq.h:1:
   In file included from include/asm-generic/hardirq.h:17:
   In file included from include/linux/irq.h:20:
   In file included from include/linux/io.h:13:
   In file included from arch/hexagon/include/asm/io.h:334:
   include/asm-generic/io.h:584:33: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           __raw_writeb(value, PCI_IOBASE + addr);
                               ~~~~~~~~~~ ^
   include/asm-generic/io.h:594:59: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
                                                         ~~~~~~~~~~ ^
   include/asm-generic/io.h:604:59: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
                                                         ~~~~~~~~~~ ^
>> mm/madvise.c:464:43: error: incompatible pointer types passing 'struct folio *' to parameter of type 'struct page *' [-Werror,-Wincompatible-pointer-types]
                   VM_BUG_ON_PAGE(folio_test_large(folio), folio);
                                                           ^~~~~
   include/linux/mmdebug.h:21:14: note: expanded from macro 'VM_BUG_ON_PAGE'
                           dump_page(page, "VM_BUG_ON_PAGE(" __stringify(cond)")");\
                                     ^~~~
   include/linux/mmdebug.h:12:29: note: passing argument to parameter 'page' here
   void dump_page(struct page *page, const char *reason);
                               ^
   6 warnings and 1 error generated.


vim +464 mm/madvise.c

   323	
   324	static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
   325					unsigned long addr, unsigned long end,
   326					struct mm_walk *walk)
   327	{
   328		struct madvise_walk_private *private = walk->private;
   329		struct mmu_gather *tlb = private->tlb;
   330		bool pageout = private->pageout;
   331		struct mm_struct *mm = tlb->mm;
   332		struct vm_area_struct *vma = walk->vma;
   333		pte_t *orig_pte, *pte, ptent;
   334		spinlock_t *ptl;
   335		struct folio *folio = NULL;
   336		struct page *page = NULL;
   337		LIST_HEAD(folio_list);
   338	
   339		if (fatal_signal_pending(current))
   340			return -EINTR;
   341	
   342	#ifdef CONFIG_TRANSPARENT_HUGEPAGE
   343		if (pmd_trans_huge(*pmd)) {
   344			pmd_t orig_pmd;
   345			unsigned long next = pmd_addr_end(addr, end);
   346	
   347			tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
   348			ptl = pmd_trans_huge_lock(pmd, vma);
   349			if (!ptl)
   350				return 0;
   351	
   352			orig_pmd = *pmd;
   353			if (is_huge_zero_pmd(orig_pmd))
   354				goto huge_unlock;
   355	
   356			if (unlikely(!pmd_present(orig_pmd))) {
   357				VM_BUG_ON(thp_migration_supported() &&
   358						!is_pmd_migration_entry(orig_pmd));
   359				goto huge_unlock;
   360			}
   361	
   362			folio = pfn_folio(pmd_pfn(orig_pmd));
   363	
   364			/* Do not interfere with other mappings of this folio */
   365			if (folio_mapcount(folio) != 1)
   366				goto huge_unlock;
   367	
   368			if (next - addr != HPAGE_PMD_SIZE) {
   369				int err;
   370	
   371				folio_get(folio);
   372				spin_unlock(ptl);
   373				folio_lock(folio);
   374				err = split_folio(folio);
   375				folio_unlock(folio);
   376				folio_put(folio);
   377				if (!err)
   378					goto regular_folio;
   379				return 0;
   380			}
   381	
   382			if (pmd_young(orig_pmd)) {
   383				pmdp_invalidate(vma, addr, pmd);
   384				orig_pmd = pmd_mkold(orig_pmd);
   385	
   386				set_pmd_at(mm, addr, pmd, orig_pmd);
   387				tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
   388			}
   389	
   390			folio_clear_referenced(folio);
   391			folio_test_clear_young(folio);
   392			if (pageout) {
   393				if (!folio_isolate_lru(folio)) {
   394					if (folio_test_unevictable(folio))
   395						folio_putback_lru(folio);
   396					else
   397						list_add(&folio->lru, &folio_list);
   398				}
   399			} else
   400				deactivate_page(&folio->page);
   401	huge_unlock:
   402			spin_unlock(ptl);
   403			if (pageout)
   404				reclaim_pages(&folio_list);
   405			return 0;
   406		}
   407	
   408	regular_folio:
   409		if (pmd_trans_unstable(pmd))
   410			return 0;
   411	#endif
   412		tlb_change_page_size(tlb, PAGE_SIZE);
   413		orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
   414		flush_tlb_batched_pending(mm);
   415		arch_enter_lazy_mmu_mode();
   416		for (; addr < end; pte++, addr += PAGE_SIZE) {
   417			ptent = *pte;
   418	
   419			if (pte_none(ptent))
   420				continue;
   421	
   422			if (!pte_present(ptent))
   423				continue;
   424	
   425			page = vm_normal_page(vma, addr, ptent);
   426			if (!page || is_zone_device_page(page))
   427				continue;
   428			folio = page_folio(page);
   429	
   430			/*
   431			 * Creating a THP page is expensive so split it only if we
   432			 * are sure it's worth. Split it if we are only owner.
   433			 */
   434			if (folio_test_large(folio)) {
   435				if (folio_mapcount(folio) != 1)
   436					break;
   437				folio_get(folio);
   438				if (!folio_trylock(folio)) {
   439					folio_put(folio);
   440					break;
   441				}
   442				pte_unmap_unlock(orig_pte, ptl);
   443				if (split_folio(folio)) {
   444					folio_unlock(folio);
   445					folio_put(folio);
   446					orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
   447					break;
   448				}
   449				folio_unlock(folio);
   450				folio_put(folio);
   451				orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
   452				pte--;
   453				addr -= PAGE_SIZE;
   454				continue;
   455			}
   456	
   457			/*
   458			 * Do not interfere with other mappings of this folio and
   459			 * non-LRU folio.
   460			 */
   461			if (!folio_test_lru(folio))
   462				continue;
   463	
 > 464			VM_BUG_ON_PAGE(folio_test_large(folio), folio);
   465	
   466			if (pte_young(ptent)) {
   467				ptent = ptep_get_and_clear_full(mm, addr, pte,
   468								tlb->fullmm);
   469				ptent = pte_mkold(ptent);
   470				set_pte_at(mm, addr, pte, ptent);
   471				tlb_remove_tlb_entry(tlb, pte, addr);
   472			}
   473	
   474			/*
   475			 * We are deactivating a folio for accelerating reclaiming.
   476			 * VM couldn't reclaim the folio unless we clear PG_young.
   477			 * As a side effect, it makes confuse idle-page tracking
   478			 * because they will miss recent referenced history.
   479			 */
   480			folio_clear_referenced(folio);
   481			folio_test_clear_young(folio);
   482			if (pageout) {
   483				if (!folio_isolate_lru(folio)) {
   484					if (folio_test_unevictable(folio))
   485						folio_putback_lru(folio);
   486					else
   487						list_add(&folio->lru, &folio_list);
   488				}
   489			} else
   490				deactivate_page(&folio->page);
   491		}
   492	
   493		arch_leave_lazy_mmu_mode();
   494		pte_unmap_unlock(orig_pte, ptl);
   495		if (pageout)
   496			reclaim_pages(&folio_list);
   497		cond_resched();
   498	
   499		return 0;
   500	}
   501
kernel test robot Dec. 7, 2022, 11:46 p.m. UTC | #2
Hi Vishal,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on linus/master]
[also build test ERROR on v6.1-rc8]
[cannot apply to akpm-mm/mm-everything next-20221207]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Vishal-Moola-Oracle/Convert-deactivate_page-to-deactivate_folio/20221207-082339
patch link:    https://lore.kernel.org/r/20221207002158.418789-2-vishal.moola%40gmail.com
patch subject: [PATCH 1/3] madvise: Convert madvise_cold_or_pageout_pte_range() to use folios
config: x86_64-randconfig-a011
compiler: gcc-11 (Debian 11.3.0-8) 11.3.0
reproduce (this is a W=1 build):
        # https://github.com/intel-lab-lkp/linux/commit/670bf0d6d5222d0c92e72b7869d25fa64f582082
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Vishal-Moola-Oracle/Convert-deactivate_page-to-deactivate_folio/20221207-082339
        git checkout 670bf0d6d5222d0c92e72b7869d25fa64f582082
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   In file included from include/linux/mm.h:6,
                    from include/linux/mman.h:5,
                    from mm/madvise.c:9:
   mm/madvise.c: In function 'madvise_cold_or_pageout_pte_range':
>> mm/madvise.c:464:57: error: passing argument 1 of 'dump_page' from incompatible pointer type [-Werror=incompatible-pointer-types]
     464 |                 VM_BUG_ON_PAGE(folio_test_large(folio), folio);
         |                                                         ^~~~~
         |                                                         |
         |                                                         struct folio *
   include/linux/mmdebug.h:21:35: note: in definition of macro 'VM_BUG_ON_PAGE'
      21 |                         dump_page(page, "VM_BUG_ON_PAGE(" __stringify(cond)")");\
         |                                   ^~~~
   include/linux/mmdebug.h:12:29: note: expected 'struct page *' but argument is of type 'struct folio *'
      12 | void dump_page(struct page *page, const char *reason);
         |                ~~~~~~~~~~~~~^~~~
   cc1: some warnings being treated as errors


vim +/dump_page +464 mm/madvise.c

   323	
   324	static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
   325					unsigned long addr, unsigned long end,
   326					struct mm_walk *walk)
   327	{
   328		struct madvise_walk_private *private = walk->private;
   329		struct mmu_gather *tlb = private->tlb;
   330		bool pageout = private->pageout;
   331		struct mm_struct *mm = tlb->mm;
   332		struct vm_area_struct *vma = walk->vma;
   333		pte_t *orig_pte, *pte, ptent;
   334		spinlock_t *ptl;
   335		struct folio *folio = NULL;
   336		struct page *page = NULL;
   337		LIST_HEAD(folio_list);
   338	
   339		if (fatal_signal_pending(current))
   340			return -EINTR;
   341	
   342	#ifdef CONFIG_TRANSPARENT_HUGEPAGE
   343		if (pmd_trans_huge(*pmd)) {
   344			pmd_t orig_pmd;
   345			unsigned long next = pmd_addr_end(addr, end);
   346	
   347			tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
   348			ptl = pmd_trans_huge_lock(pmd, vma);
   349			if (!ptl)
   350				return 0;
   351	
   352			orig_pmd = *pmd;
   353			if (is_huge_zero_pmd(orig_pmd))
   354				goto huge_unlock;
   355	
   356			if (unlikely(!pmd_present(orig_pmd))) {
   357				VM_BUG_ON(thp_migration_supported() &&
   358						!is_pmd_migration_entry(orig_pmd));
   359				goto huge_unlock;
   360			}
   361	
   362			folio = pfn_folio(pmd_pfn(orig_pmd));
   363	
   364			/* Do not interfere with other mappings of this folio */
   365			if (folio_mapcount(folio) != 1)
   366				goto huge_unlock;
   367	
   368			if (next - addr != HPAGE_PMD_SIZE) {
   369				int err;
   370	
   371				folio_get(folio);
   372				spin_unlock(ptl);
   373				folio_lock(folio);
   374				err = split_folio(folio);
   375				folio_unlock(folio);
   376				folio_put(folio);
   377				if (!err)
   378					goto regular_folio;
   379				return 0;
   380			}
   381	
   382			if (pmd_young(orig_pmd)) {
   383				pmdp_invalidate(vma, addr, pmd);
   384				orig_pmd = pmd_mkold(orig_pmd);
   385	
   386				set_pmd_at(mm, addr, pmd, orig_pmd);
   387				tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
   388			}
   389	
   390			folio_clear_referenced(folio);
   391			folio_test_clear_young(folio);
   392			if (pageout) {
   393				if (!folio_isolate_lru(folio)) {
   394					if (folio_test_unevictable(folio))
   395						folio_putback_lru(folio);
   396					else
   397						list_add(&folio->lru, &folio_list);
   398				}
   399			} else
   400				deactivate_page(&folio->page);
   401	huge_unlock:
   402			spin_unlock(ptl);
   403			if (pageout)
   404				reclaim_pages(&folio_list);
   405			return 0;
   406		}
   407	
   408	regular_folio:
   409		if (pmd_trans_unstable(pmd))
   410			return 0;
   411	#endif
   412		tlb_change_page_size(tlb, PAGE_SIZE);
   413		orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
   414		flush_tlb_batched_pending(mm);
   415		arch_enter_lazy_mmu_mode();
   416		for (; addr < end; pte++, addr += PAGE_SIZE) {
   417			ptent = *pte;
   418	
   419			if (pte_none(ptent))
   420				continue;
   421	
   422			if (!pte_present(ptent))
   423				continue;
   424	
   425			page = vm_normal_page(vma, addr, ptent);
   426			if (!page || is_zone_device_page(page))
   427				continue;
   428			folio = page_folio(page);
   429	
   430			/*
   431			 * Creating a THP page is expensive so split it only if we
   432			 * are sure it's worth. Split it if we are only owner.
   433			 */
   434			if (folio_test_large(folio)) {
   435				if (folio_mapcount(folio) != 1)
   436					break;
   437				folio_get(folio);
   438				if (!folio_trylock(folio)) {
   439					folio_put(folio);
   440					break;
   441				}
   442				pte_unmap_unlock(orig_pte, ptl);
   443				if (split_folio(folio)) {
   444					folio_unlock(folio);
   445					folio_put(folio);
   446					orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
   447					break;
   448				}
   449				folio_unlock(folio);
   450				folio_put(folio);
   451				orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
   452				pte--;
   453				addr -= PAGE_SIZE;
   454				continue;
   455			}
   456	
   457			/*
   458			 * Do not interfere with other mappings of this folio and
   459			 * non-LRU folio.
   460			 */
   461			if (!folio_test_lru(folio))
   462				continue;
   463	
 > 464			VM_BUG_ON_PAGE(folio_test_large(folio), folio);
   465	
   466			if (pte_young(ptent)) {
   467				ptent = ptep_get_and_clear_full(mm, addr, pte,
   468								tlb->fullmm);
   469				ptent = pte_mkold(ptent);
   470				set_pte_at(mm, addr, pte, ptent);
   471				tlb_remove_tlb_entry(tlb, pte, addr);
   472			}
   473	
   474			/*
   475			 * We are deactivating a folio for accelerating reclaiming.
   476			 * VM couldn't reclaim the folio unless we clear PG_young.
   477			 * As a side effect, it makes confuse idle-page tracking
   478			 * because they will miss recent referenced history.
   479			 */
   480			folio_clear_referenced(folio);
   481			folio_test_clear_young(folio);
   482			if (pageout) {
   483				if (!folio_isolate_lru(folio)) {
   484					if (folio_test_unevictable(folio))
   485						folio_putback_lru(folio);
   486					else
   487						list_add(&folio->lru, &folio_list);
   488				}
   489			} else
   490				deactivate_page(&folio->page);
   491		}
   492	
   493		arch_leave_lazy_mmu_mode();
   494		pte_unmap_unlock(orig_pte, ptl);
   495		if (pageout)
   496			reclaim_pages(&folio_list);
   497		cond_resched();
   498	
   499		return 0;
   500	}
   501
diff mbox series

Patch

diff --git a/mm/madvise.c b/mm/madvise.c
index 2baa93ca2310..59bfc6c9c548 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -332,8 +332,9 @@  static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
 	struct vm_area_struct *vma = walk->vma;
 	pte_t *orig_pte, *pte, ptent;
 	spinlock_t *ptl;
+	struct folio *folio = NULL;
 	struct page *page = NULL;
-	LIST_HEAD(page_list);
+	LIST_HEAD(folio_list);
 
 	if (fatal_signal_pending(current))
 		return -EINTR;
@@ -358,23 +359,23 @@  static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
 			goto huge_unlock;
 		}
 
-		page = pmd_page(orig_pmd);
+		folio = pfn_folio(pmd_pfn(orig_pmd));
 
-		/* Do not interfere with other mappings of this page */
-		if (page_mapcount(page) != 1)
+		/* Do not interfere with other mappings of this folio */
+		if (folio_mapcount(folio) != 1)
 			goto huge_unlock;
 
 		if (next - addr != HPAGE_PMD_SIZE) {
 			int err;
 
-			get_page(page);
+			folio_get(folio);
 			spin_unlock(ptl);
-			lock_page(page);
-			err = split_huge_page(page);
-			unlock_page(page);
-			put_page(page);
+			folio_lock(folio);
+			err = split_folio(folio);
+			folio_unlock(folio);
+			folio_put(folio);
 			if (!err)
-				goto regular_page;
+				goto regular_folio;
 			return 0;
 		}
 
@@ -386,25 +387,25 @@  static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
 			tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
 		}
 
-		ClearPageReferenced(page);
-		test_and_clear_page_young(page);
+		folio_clear_referenced(folio);
+		folio_test_clear_young(folio);
 		if (pageout) {
-			if (!isolate_lru_page(page)) {
-				if (PageUnevictable(page))
-					putback_lru_page(page);
+			if (!folio_isolate_lru(folio)) {
+				if (folio_test_unevictable(folio))
+					folio_putback_lru(folio);
 				else
-					list_add(&page->lru, &page_list);
+					list_add(&folio->lru, &folio_list);
 			}
 		} else
-			deactivate_page(page);
+			deactivate_page(&folio->page);
 huge_unlock:
 		spin_unlock(ptl);
 		if (pageout)
-			reclaim_pages(&page_list);
+			reclaim_pages(&folio_list);
 		return 0;
 	}
 
-regular_page:
+regular_folio:
 	if (pmd_trans_unstable(pmd))
 		return 0;
 #endif
@@ -424,28 +425,29 @@  static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
 		page = vm_normal_page(vma, addr, ptent);
 		if (!page || is_zone_device_page(page))
 			continue;
+		folio = page_folio(page);
 
 		/*
 		 * Creating a THP page is expensive so split it only if we
 		 * are sure it's worth. Split it if we are only owner.
 		 */
-		if (PageTransCompound(page)) {
-			if (page_mapcount(page) != 1)
+		if (folio_test_large(folio)) {
+			if (folio_mapcount(folio) != 1)
 				break;
-			get_page(page);
-			if (!trylock_page(page)) {
-				put_page(page);
+			folio_get(folio);
+			if (!folio_trylock(folio)) {
+				folio_put(folio);
 				break;
 			}
 			pte_unmap_unlock(orig_pte, ptl);
-			if (split_huge_page(page)) {
-				unlock_page(page);
-				put_page(page);
+			if (split_folio(folio)) {
+				folio_unlock(folio);
+				folio_put(folio);
 				orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 				break;
 			}
-			unlock_page(page);
-			put_page(page);
+			folio_unlock(folio);
+			folio_put(folio);
 			orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 			pte--;
 			addr -= PAGE_SIZE;
@@ -453,13 +455,13 @@  static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
 		}
 
 		/*
-		 * Do not interfere with other mappings of this page and
-		 * non-LRU page.
+		 * Do not interfere with other mappings of this folio and
+		 * non-LRU folio.
 		 */
-		if (!PageLRU(page) || page_mapcount(page) != 1)
+		if (!folio_test_lru(folio))
 			continue;
 
-		VM_BUG_ON_PAGE(PageTransCompound(page), page);
+		VM_BUG_ON_PAGE(folio_test_large(folio), folio);
 
 		if (pte_young(ptent)) {
 			ptent = ptep_get_and_clear_full(mm, addr, pte,
@@ -470,28 +472,28 @@  static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
 		}
 
 		/*
-		 * We are deactivating a page for accelerating reclaiming.
-		 * VM couldn't reclaim the page unless we clear PG_young.
+		 * We are deactivating a folio for accelerating reclaiming.
+		 * VM couldn't reclaim the folio unless we clear PG_young.
 		 * As a side effect, it makes confuse idle-page tracking
 		 * because they will miss recent referenced history.
 		 */
-		ClearPageReferenced(page);
-		test_and_clear_page_young(page);
+		folio_clear_referenced(folio);
+		folio_test_clear_young(folio);
 		if (pageout) {
-			if (!isolate_lru_page(page)) {
-				if (PageUnevictable(page))
-					putback_lru_page(page);
+			if (!folio_isolate_lru(folio)) {
+				if (folio_test_unevictable(folio))
+					folio_putback_lru(folio);
 				else
-					list_add(&page->lru, &page_list);
+					list_add(&folio->lru, &folio_list);
 			}
 		} else
-			deactivate_page(page);
+			deactivate_page(&folio->page);
 	}
 
 	arch_leave_lazy_mmu_mode();
 	pte_unmap_unlock(orig_pte, ptl);
 	if (pageout)
-		reclaim_pages(&page_list);
+		reclaim_pages(&folio_list);
 	cond_resched();
 
 	return 0;