diff mbox series

[mm-unstable,v1,4/4] mm/mglru: remove CONFIG_TRANSPARENT_HUGEPAGE

Message ID 20231220040037.883811-5-kinseyho@google.com (mailing list archive)
State New
Headers show
Series mm/mglru: Kconfig cleanup | expand

Commit Message

Kinsey Ho Dec. 20, 2023, 4 a.m. UTC
Improve code readability by removing CONFIG_TRANSPARENT_HUGEPAGE,
since the compiler should be able to automatically optimize out the
code that promotes THPs during page table walks.

No functional changes.

Signed-off-by: Kinsey Ho <kinseyho@google.com>
Co-developed-by: Aneesh Kumar K V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Aneesh Kumar K V <aneesh.kumar@linux.ibm.com>
---
 mm/vmscan.c | 12 +-----------
 1 file changed, 1 insertion(+), 11 deletions(-)

Comments

kernel test robot Dec. 20, 2023, 4:47 p.m. UTC | #1
Hi Kinsey,

kernel test robot noticed the following build errors:

[auto build test ERROR on akpm-mm/mm-everything]

url:    https://github.com/intel-lab-lkp/linux/commits/Kinsey-Ho/mm-mglru-add-CONFIG_ARCH_HAS_HW_PTE_YOUNG/20231220-120318
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20231220040037.883811-5-kinseyho%40google.com
patch subject: [PATCH mm-unstable v1 4/4] mm/mglru: remove CONFIG_TRANSPARENT_HUGEPAGE
config: arm-randconfig-002-20231220 (https://download.01.org/0day-ci/archive/20231221/202312210042.xQEiqlEh-lkp@intel.com/config)
compiler: arm-linux-gnueabi-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231221/202312210042.xQEiqlEh-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202312210042.xQEiqlEh-lkp@intel.com/

All errors (new ones prefixed by >>):

   mm/vmscan.c: In function 'walk_pmd_range_locked':
>> mm/vmscan.c:3455:21: error: implicit declaration of function 'pmd_dirty'; did you mean 'pte_dirty'? [-Werror=implicit-function-declaration]
    3455 |                 if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) &&
         |                     ^~~~~~~~~
         |                     pte_dirty
   cc1: some warnings being treated as errors


vim +3455 mm/vmscan.c

bd74fdaea146029 Yu Zhao        2022-09-18  3394  
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18  3395  static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma,
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18  3396  				  struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
bd74fdaea146029 Yu Zhao        2022-09-18  3397  {
bd74fdaea146029 Yu Zhao        2022-09-18  3398  	int i;
bd74fdaea146029 Yu Zhao        2022-09-18  3399  	pmd_t *pmd;
bd74fdaea146029 Yu Zhao        2022-09-18  3400  	spinlock_t *ptl;
bd74fdaea146029 Yu Zhao        2022-09-18  3401  	struct lru_gen_mm_walk *walk = args->private;
bd74fdaea146029 Yu Zhao        2022-09-18  3402  	struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
bd74fdaea146029 Yu Zhao        2022-09-18  3403  	struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
bd74fdaea146029 Yu Zhao        2022-09-18  3404  	int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
bd74fdaea146029 Yu Zhao        2022-09-18  3405  
bd74fdaea146029 Yu Zhao        2022-09-18  3406  	VM_WARN_ON_ONCE(pud_leaf(*pud));
bd74fdaea146029 Yu Zhao        2022-09-18  3407  
bd74fdaea146029 Yu Zhao        2022-09-18  3408  	/* try to batch at most 1+MIN_LRU_BATCH+1 entries */
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18  3409  	if (*first == -1) {
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18  3410  		*first = addr;
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18  3411  		bitmap_zero(bitmap, MIN_LRU_BATCH);
bd74fdaea146029 Yu Zhao        2022-09-18  3412  		return;
bd74fdaea146029 Yu Zhao        2022-09-18  3413  	}
bd74fdaea146029 Yu Zhao        2022-09-18  3414  
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18  3415  	i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first);
bd74fdaea146029 Yu Zhao        2022-09-18  3416  	if (i && i <= MIN_LRU_BATCH) {
bd74fdaea146029 Yu Zhao        2022-09-18  3417  		__set_bit(i - 1, bitmap);
bd74fdaea146029 Yu Zhao        2022-09-18  3418  		return;
bd74fdaea146029 Yu Zhao        2022-09-18  3419  	}
bd74fdaea146029 Yu Zhao        2022-09-18  3420  
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18  3421  	pmd = pmd_offset(pud, *first);
bd74fdaea146029 Yu Zhao        2022-09-18  3422  
bd74fdaea146029 Yu Zhao        2022-09-18  3423  	ptl = pmd_lockptr(args->mm, pmd);
bd74fdaea146029 Yu Zhao        2022-09-18  3424  	if (!spin_trylock(ptl))
bd74fdaea146029 Yu Zhao        2022-09-18  3425  		goto done;
bd74fdaea146029 Yu Zhao        2022-09-18  3426  
bd74fdaea146029 Yu Zhao        2022-09-18  3427  	arch_enter_lazy_mmu_mode();
bd74fdaea146029 Yu Zhao        2022-09-18  3428  
bd74fdaea146029 Yu Zhao        2022-09-18  3429  	do {
bd74fdaea146029 Yu Zhao        2022-09-18  3430  		unsigned long pfn;
bd74fdaea146029 Yu Zhao        2022-09-18  3431  		struct folio *folio;
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18  3432  
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18  3433  		/* don't round down the first address */
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18  3434  		addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first;
bd74fdaea146029 Yu Zhao        2022-09-18  3435  
bd74fdaea146029 Yu Zhao        2022-09-18  3436  		pfn = get_pmd_pfn(pmd[i], vma, addr);
bd74fdaea146029 Yu Zhao        2022-09-18  3437  		if (pfn == -1)
bd74fdaea146029 Yu Zhao        2022-09-18  3438  			goto next;
bd74fdaea146029 Yu Zhao        2022-09-18  3439  
bd74fdaea146029 Yu Zhao        2022-09-18  3440  		if (!pmd_trans_huge(pmd[i])) {
bd02df412cbb9a6 T.J. Alumbaugh 2023-05-22  3441  			if (should_clear_pmd_young())
bd74fdaea146029 Yu Zhao        2022-09-18  3442  				pmdp_test_and_clear_young(vma, addr, pmd + i);
bd74fdaea146029 Yu Zhao        2022-09-18  3443  			goto next;
bd74fdaea146029 Yu Zhao        2022-09-18  3444  		}
bd74fdaea146029 Yu Zhao        2022-09-18  3445  
bd74fdaea146029 Yu Zhao        2022-09-18  3446  		folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
bd74fdaea146029 Yu Zhao        2022-09-18  3447  		if (!folio)
bd74fdaea146029 Yu Zhao        2022-09-18  3448  			goto next;
bd74fdaea146029 Yu Zhao        2022-09-18  3449  
bd74fdaea146029 Yu Zhao        2022-09-18  3450  		if (!pmdp_test_and_clear_young(vma, addr, pmd + i))
bd74fdaea146029 Yu Zhao        2022-09-18  3451  			goto next;
bd74fdaea146029 Yu Zhao        2022-09-18  3452  
bd74fdaea146029 Yu Zhao        2022-09-18  3453  		walk->mm_stats[MM_LEAF_YOUNG]++;
bd74fdaea146029 Yu Zhao        2022-09-18  3454  
bd74fdaea146029 Yu Zhao        2022-09-18 @3455  		if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) &&
bd74fdaea146029 Yu Zhao        2022-09-18  3456  		    !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
bd74fdaea146029 Yu Zhao        2022-09-18  3457  		      !folio_test_swapcache(folio)))
bd74fdaea146029 Yu Zhao        2022-09-18  3458  			folio_mark_dirty(folio);
bd74fdaea146029 Yu Zhao        2022-09-18  3459  
bd74fdaea146029 Yu Zhao        2022-09-18  3460  		old_gen = folio_update_gen(folio, new_gen);
bd74fdaea146029 Yu Zhao        2022-09-18  3461  		if (old_gen >= 0 && old_gen != new_gen)
bd74fdaea146029 Yu Zhao        2022-09-18  3462  			update_batch_size(walk, folio, old_gen, new_gen);
bd74fdaea146029 Yu Zhao        2022-09-18  3463  next:
bd74fdaea146029 Yu Zhao        2022-09-18  3464  		i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1;
bd74fdaea146029 Yu Zhao        2022-09-18  3465  	} while (i <= MIN_LRU_BATCH);
bd74fdaea146029 Yu Zhao        2022-09-18  3466  
bd74fdaea146029 Yu Zhao        2022-09-18  3467  	arch_leave_lazy_mmu_mode();
bd74fdaea146029 Yu Zhao        2022-09-18  3468  	spin_unlock(ptl);
bd74fdaea146029 Yu Zhao        2022-09-18  3469  done:
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18  3470  	*first = -1;
bd74fdaea146029 Yu Zhao        2022-09-18  3471  }
bd74fdaea146029 Yu Zhao        2022-09-18  3472
kernel test robot Dec. 20, 2023, 10:38 p.m. UTC | #2
Hi Kinsey,

kernel test robot noticed the following build errors:

[auto build test ERROR on akpm-mm/mm-everything]

url:    https://github.com/intel-lab-lkp/linux/commits/Kinsey-Ho/mm-mglru-add-CONFIG_ARCH_HAS_HW_PTE_YOUNG/20231220-120318
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20231220040037.883811-5-kinseyho%40google.com
patch subject: [PATCH mm-unstable v1 4/4] mm/mglru: remove CONFIG_TRANSPARENT_HUGEPAGE
config: hexagon-allmodconfig (https://download.01.org/0day-ci/archive/20231221/202312210606.1Etqz3M4-lkp@intel.com/config)
compiler: clang version 18.0.0git (https://github.com/llvm/llvm-project 7022a24771c8404f847abb226735a3ae21794426)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231221/202312210606.1Etqz3M4-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202312210606.1Etqz3M4-lkp@intel.com/

All errors (new ones prefixed by >>):

   In file included from mm/vmscan.c:19:
   In file included from include/linux/kernel_stat.h:9:
   In file included from include/linux/interrupt.h:11:
   In file included from include/linux/hardirq.h:11:
   In file included from ./arch/hexagon/include/generated/asm/hardirq.h:1:
   In file included from include/asm-generic/hardirq.h:17:
   In file included from include/linux/irq.h:20:
   In file included from include/linux/io.h:13:
   In file included from arch/hexagon/include/asm/io.h:337:
   include/asm-generic/io.h:547:31: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
     547 |         val = __raw_readb(PCI_IOBASE + addr);
         |                           ~~~~~~~~~~ ^
   include/asm-generic/io.h:560:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
     560 |         val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr));
         |                                                         ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/little_endian.h:37:51: note: expanded from macro '__le16_to_cpu'
      37 | #define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
         |                                                   ^
   In file included from mm/vmscan.c:19:
   In file included from include/linux/kernel_stat.h:9:
   In file included from include/linux/interrupt.h:11:
   In file included from include/linux/hardirq.h:11:
   In file included from ./arch/hexagon/include/generated/asm/hardirq.h:1:
   In file included from include/asm-generic/hardirq.h:17:
   In file included from include/linux/irq.h:20:
   In file included from include/linux/io.h:13:
   In file included from arch/hexagon/include/asm/io.h:337:
   include/asm-generic/io.h:573:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
     573 |         val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
         |                                                         ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/little_endian.h:35:51: note: expanded from macro '__le32_to_cpu'
      35 | #define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
         |                                                   ^
   In file included from mm/vmscan.c:19:
   In file included from include/linux/kernel_stat.h:9:
   In file included from include/linux/interrupt.h:11:
   In file included from include/linux/hardirq.h:11:
   In file included from ./arch/hexagon/include/generated/asm/hardirq.h:1:
   In file included from include/asm-generic/hardirq.h:17:
   In file included from include/linux/irq.h:20:
   In file included from include/linux/io.h:13:
   In file included from arch/hexagon/include/asm/io.h:337:
   include/asm-generic/io.h:584:33: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
     584 |         __raw_writeb(value, PCI_IOBASE + addr);
         |                             ~~~~~~~~~~ ^
   include/asm-generic/io.h:594:59: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
     594 |         __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
         |                                                       ~~~~~~~~~~ ^
   include/asm-generic/io.h:604:59: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
     604 |         __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
         |                                                       ~~~~~~~~~~ ^
>> mm/vmscan.c:3455:7: error: call to undeclared function 'pmd_dirty'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
    3455 |                 if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) &&
         |                     ^
   mm/vmscan.c:3455:7: note: did you mean 'pte_dirty'?
   arch/hexagon/include/asm/pgtable.h:282:19: note: 'pte_dirty' declared here
     282 | static inline int pte_dirty(pte_t pte)
         |                   ^
   6 warnings and 1 error generated.


vim +/pmd_dirty +3455 mm/vmscan.c

bd74fdaea14602 Yu Zhao        2022-09-18  3394  
b5ff4133617d0e T.J. Alumbaugh 2023-01-18  3395  static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma,
b5ff4133617d0e T.J. Alumbaugh 2023-01-18  3396  				  struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
bd74fdaea14602 Yu Zhao        2022-09-18  3397  {
bd74fdaea14602 Yu Zhao        2022-09-18  3398  	int i;
bd74fdaea14602 Yu Zhao        2022-09-18  3399  	pmd_t *pmd;
bd74fdaea14602 Yu Zhao        2022-09-18  3400  	spinlock_t *ptl;
bd74fdaea14602 Yu Zhao        2022-09-18  3401  	struct lru_gen_mm_walk *walk = args->private;
bd74fdaea14602 Yu Zhao        2022-09-18  3402  	struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
bd74fdaea14602 Yu Zhao        2022-09-18  3403  	struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
bd74fdaea14602 Yu Zhao        2022-09-18  3404  	int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
bd74fdaea14602 Yu Zhao        2022-09-18  3405  
bd74fdaea14602 Yu Zhao        2022-09-18  3406  	VM_WARN_ON_ONCE(pud_leaf(*pud));
bd74fdaea14602 Yu Zhao        2022-09-18  3407  
bd74fdaea14602 Yu Zhao        2022-09-18  3408  	/* try to batch at most 1+MIN_LRU_BATCH+1 entries */
b5ff4133617d0e T.J. Alumbaugh 2023-01-18  3409  	if (*first == -1) {
b5ff4133617d0e T.J. Alumbaugh 2023-01-18  3410  		*first = addr;
b5ff4133617d0e T.J. Alumbaugh 2023-01-18  3411  		bitmap_zero(bitmap, MIN_LRU_BATCH);
bd74fdaea14602 Yu Zhao        2022-09-18  3412  		return;
bd74fdaea14602 Yu Zhao        2022-09-18  3413  	}
bd74fdaea14602 Yu Zhao        2022-09-18  3414  
b5ff4133617d0e T.J. Alumbaugh 2023-01-18  3415  	i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first);
bd74fdaea14602 Yu Zhao        2022-09-18  3416  	if (i && i <= MIN_LRU_BATCH) {
bd74fdaea14602 Yu Zhao        2022-09-18  3417  		__set_bit(i - 1, bitmap);
bd74fdaea14602 Yu Zhao        2022-09-18  3418  		return;
bd74fdaea14602 Yu Zhao        2022-09-18  3419  	}
bd74fdaea14602 Yu Zhao        2022-09-18  3420  
b5ff4133617d0e T.J. Alumbaugh 2023-01-18  3421  	pmd = pmd_offset(pud, *first);
bd74fdaea14602 Yu Zhao        2022-09-18  3422  
bd74fdaea14602 Yu Zhao        2022-09-18  3423  	ptl = pmd_lockptr(args->mm, pmd);
bd74fdaea14602 Yu Zhao        2022-09-18  3424  	if (!spin_trylock(ptl))
bd74fdaea14602 Yu Zhao        2022-09-18  3425  		goto done;
bd74fdaea14602 Yu Zhao        2022-09-18  3426  
bd74fdaea14602 Yu Zhao        2022-09-18  3427  	arch_enter_lazy_mmu_mode();
bd74fdaea14602 Yu Zhao        2022-09-18  3428  
bd74fdaea14602 Yu Zhao        2022-09-18  3429  	do {
bd74fdaea14602 Yu Zhao        2022-09-18  3430  		unsigned long pfn;
bd74fdaea14602 Yu Zhao        2022-09-18  3431  		struct folio *folio;
b5ff4133617d0e T.J. Alumbaugh 2023-01-18  3432  
b5ff4133617d0e T.J. Alumbaugh 2023-01-18  3433  		/* don't round down the first address */
b5ff4133617d0e T.J. Alumbaugh 2023-01-18  3434  		addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first;
bd74fdaea14602 Yu Zhao        2022-09-18  3435  
bd74fdaea14602 Yu Zhao        2022-09-18  3436  		pfn = get_pmd_pfn(pmd[i], vma, addr);
bd74fdaea14602 Yu Zhao        2022-09-18  3437  		if (pfn == -1)
bd74fdaea14602 Yu Zhao        2022-09-18  3438  			goto next;
bd74fdaea14602 Yu Zhao        2022-09-18  3439  
bd74fdaea14602 Yu Zhao        2022-09-18  3440  		if (!pmd_trans_huge(pmd[i])) {
bd02df412cbb9a T.J. Alumbaugh 2023-05-22  3441  			if (should_clear_pmd_young())
bd74fdaea14602 Yu Zhao        2022-09-18  3442  				pmdp_test_and_clear_young(vma, addr, pmd + i);
bd74fdaea14602 Yu Zhao        2022-09-18  3443  			goto next;
bd74fdaea14602 Yu Zhao        2022-09-18  3444  		}
bd74fdaea14602 Yu Zhao        2022-09-18  3445  
bd74fdaea14602 Yu Zhao        2022-09-18  3446  		folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
bd74fdaea14602 Yu Zhao        2022-09-18  3447  		if (!folio)
bd74fdaea14602 Yu Zhao        2022-09-18  3448  			goto next;
bd74fdaea14602 Yu Zhao        2022-09-18  3449  
bd74fdaea14602 Yu Zhao        2022-09-18  3450  		if (!pmdp_test_and_clear_young(vma, addr, pmd + i))
bd74fdaea14602 Yu Zhao        2022-09-18  3451  			goto next;
bd74fdaea14602 Yu Zhao        2022-09-18  3452  
bd74fdaea14602 Yu Zhao        2022-09-18  3453  		walk->mm_stats[MM_LEAF_YOUNG]++;
bd74fdaea14602 Yu Zhao        2022-09-18  3454  
bd74fdaea14602 Yu Zhao        2022-09-18 @3455  		if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) &&
bd74fdaea14602 Yu Zhao        2022-09-18  3456  		    !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
bd74fdaea14602 Yu Zhao        2022-09-18  3457  		      !folio_test_swapcache(folio)))
bd74fdaea14602 Yu Zhao        2022-09-18  3458  			folio_mark_dirty(folio);
bd74fdaea14602 Yu Zhao        2022-09-18  3459  
bd74fdaea14602 Yu Zhao        2022-09-18  3460  		old_gen = folio_update_gen(folio, new_gen);
bd74fdaea14602 Yu Zhao        2022-09-18  3461  		if (old_gen >= 0 && old_gen != new_gen)
bd74fdaea14602 Yu Zhao        2022-09-18  3462  			update_batch_size(walk, folio, old_gen, new_gen);
bd74fdaea14602 Yu Zhao        2022-09-18  3463  next:
bd74fdaea14602 Yu Zhao        2022-09-18  3464  		i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1;
bd74fdaea14602 Yu Zhao        2022-09-18  3465  	} while (i <= MIN_LRU_BATCH);
bd74fdaea14602 Yu Zhao        2022-09-18  3466  
bd74fdaea14602 Yu Zhao        2022-09-18  3467  	arch_leave_lazy_mmu_mode();
bd74fdaea14602 Yu Zhao        2022-09-18  3468  	spin_unlock(ptl);
bd74fdaea14602 Yu Zhao        2022-09-18  3469  done:
b5ff4133617d0e T.J. Alumbaugh 2023-01-18  3470  	*first = -1;
bd74fdaea14602 Yu Zhao        2022-09-18  3471  }
bd74fdaea14602 Yu Zhao        2022-09-18  3472
Yu Zhao Dec. 21, 2023, 5:26 a.m. UTC | #3
On Thu, Dec 21, 2023 at 12:47:51AM +0800, kernel test robot wrote:
> Hi Kinsey,
> 
> kernel test robot noticed the following build errors:
> 
> [auto build test ERROR on akpm-mm/mm-everything]
> 
> url:    https://github.com/intel-lab-lkp/linux/commits/Kinsey-Ho/mm-mglru-add-CONFIG_ARCH_HAS_HW_PTE_YOUNG/20231220-120318
> base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
> patch link:    https://lore.kernel.org/r/20231220040037.883811-5-kinseyho%40google.com
> patch subject: [PATCH mm-unstable v1 4/4] mm/mglru: remove CONFIG_TRANSPARENT_HUGEPAGE
> config: arm-randconfig-002-20231220 (https://download.01.org/0day-ci/archive/20231221/202312210042.xQEiqlEh-lkp@intel.com/config)
> compiler: arm-linux-gnueabi-gcc (GCC) 13.2.0
> reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231221/202312210042.xQEiqlEh-lkp@intel.com/reproduce)
> 
> If you fix the issue in a separate patch/commit (i.e. not just a new version of
> the same patch/commit), kindly add following tags
> | Reported-by: kernel test robot <lkp@intel.com>
> | Closes: https://lore.kernel.org/oe-kbuild-all/202312210042.xQEiqlEh-lkp@intel.com/

Thanks for the report.

Kinsey, please make sure you include the above tags (as well as the
Closes tag from the second report in this thread) in the v2.

> All errors (new ones prefixed by >>):
> 
>    mm/vmscan.c: In function 'walk_pmd_range_locked':
> >> mm/vmscan.c:3455:21: error: implicit declaration of function 'pmd_dirty'; did you mean 'pte_dirty'? [-Werror=implicit-function-declaration]
>     3455 |                 if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) &&
>          |                     ^~~~~~~~~
>          |                     pte_dirty
>    cc1: some warnings being treated as errors

Apparetly we need the following, similar to
commit 6617da8fb565 ("mm: add dummy pmd_young() for architectures not having it")

diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 29d9b12298bc..8b5df1bbf9e9 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -523,6 +523,7 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd)
 	return pmd;
 }
 
+#define pmd_dirty pmd_dirty
 static inline int pmd_dirty(pmd_t pmd)
 {
 	return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED));
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 430b208c0130..e27a4c83c548 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -655,6 +655,7 @@ static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
 	return pmd;
 }
 
+#define pmd_dirty pmd_dirty
 static inline int pmd_dirty(pmd_t pmd)
 {
 	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index ab00235b018f..7b4287f36054 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -673,6 +673,7 @@ static inline int pmd_write(pmd_t pmd)
 	return pte_write(pmd_pte(pmd));
 }
 
+#define pmd_dirty pmd_dirty
 static inline int pmd_dirty(pmd_t pmd)
 {
 	return pte_dirty(pmd_pte(pmd));
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 601e87fa8a9a..1299b56e43f6 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -770,6 +770,7 @@ static inline int pud_write(pud_t pud)
 	return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
 }
 
+#define pmd_dirty pmd_dirty
 static inline int pmd_dirty(pmd_t pmd)
 {
 	return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 5e41033bf4ca..a8c871b7d786 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -706,6 +706,7 @@ static inline unsigned long pmd_write(pmd_t pmd)
 #define pud_write(pud)	pte_write(__pte(pud_val(pud)))
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_dirty pmd_dirty
 static inline unsigned long pmd_dirty(pmd_t pmd)
 {
 	pte_t pte = __pte(pmd_val(pmd));
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 57bab91bbf50..ee83a238ac13 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -141,6 +141,7 @@ static inline int pte_young(pte_t pte)
 	return pte_flags(pte) & _PAGE_ACCESSED;
 }
 
+#define pmd_dirty pmd_dirty
 static inline bool pmd_dirty(pmd_t pmd)
 {
 	return pmd_flags(pmd) & _PAGE_DIRTY_BITS;
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index af7639c3b0a3..b646c84cc592 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -184,6 +184,13 @@ static inline int pmd_young(pmd_t pmd)
 }
 #endif
 
+#ifndef pmd_dirty
+static inline int pmd_dirty(pmd_t pmd)
+{
+	return 0;
+}
+#endif
+
 /*
  * A facility to provide lazy MMU batching.  This allows PTE updates and
  * page invalidations to be delayed until a call to leave lazy MMU mode
diff mbox series

Patch

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 351a0b5043c0..ceba905e5630 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3273,7 +3273,6 @@  static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned
 	return pfn;
 }
 
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
 static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr)
 {
 	unsigned long pfn = pmd_pfn(pmd);
@@ -3291,7 +3290,6 @@  static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned
 
 	return pfn;
 }
-#endif
 
 static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
 				   struct pglist_data *pgdat, bool can_swap)
@@ -3394,7 +3392,6 @@  static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
 	return suitable_to_scan(total, young);
 }
 
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
 static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma,
 				  struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
 {
@@ -3472,12 +3469,6 @@  static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area
 done:
 	*first = -1;
 }
-#else
-static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma,
-				  struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
-{
-}
-#endif
 
 static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
 			   struct mm_walk *args)
@@ -3513,7 +3504,6 @@  static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
 			continue;
 		}
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 		if (pmd_trans_huge(val)) {
 			unsigned long pfn = pmd_pfn(val);
 			struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
@@ -3532,7 +3522,7 @@  static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
 			walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first);
 			continue;
 		}
-#endif
+
 		walk->mm_stats[MM_NONLEAF_TOTAL]++;
 
 		if (should_clear_pmd_young()) {