Message ID | 20210217163102.13436-2-peterx@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | hugetlb: Disable huge pmd unshare for uffd-wp | expand |
Hi Peter, Thank you for the patch! Yet something to improve: [auto build test ERROR on arm64/for-next/core] [also build test ERROR on linux/master linus/master hnaz-linux-mm/master v5.11 next-20210217] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Peter-Xu/hugetlb-Disable-huge-pmd-unshare-for-uffd-wp/20210218-003520 base: https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git for-next/core config: sparc-allyesconfig (attached as .config) compiler: sparc64-linux-gcc (GCC) 9.3.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/0day-ci/linux/commit/58ca9ac367f24752915f352fa9ae1ae027a29de3 git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Peter-Xu/hugetlb-Disable-huge-pmd-unshare-for-uffd-wp/20210218-003520 git checkout 58ca9ac367f24752915f352fa9ae1ae027a29de3 # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=sparc If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> All errors (new ones prefixed by >>): >> arch/sparc/mm/hugetlbpage.c:285:1: error: expected ';', ',' or ')' before '{' token 285 | { | ^ >> arch/sparc/mm/hugetlbpage.c:269:22: error: 'huge_tte_to_size' defined but not used [-Werror=unused-function] 269 | static unsigned long huge_tte_to_size(pte_t pte) | ^~~~~~~~~~~~~~~~ cc1: all warnings being treated as errors vim +285 arch/sparc/mm/hugetlbpage.c c7d9f77d33a779 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-02-01 268 c7d9f77d33a779 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-02-01 @269 static unsigned long huge_tte_to_size(pte_t pte) c7d9f77d33a779 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-02-01 270 { c7d9f77d33a779 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-02-01 271 unsigned long size = 1UL << huge_tte_to_shift(pte); c7d9f77d33a779 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-02-01 272 c7d9f77d33a779 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-02-01 273 if (size == REAL_HPAGE_SIZE) c7d9f77d33a779 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-02-01 274 size = HPAGE_SIZE; c7d9f77d33a779 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-02-01 275 return size; c7d9f77d33a779 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-02-01 276 } c7d9f77d33a779 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-02-01 277 e6e4f42eb773c1 arch/sparc/mm/hugetlbpage.c Peter Zijlstra 2020-11-13 278 unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); } e6e4f42eb773c1 arch/sparc/mm/hugetlbpage.c Peter Zijlstra 2020-11-13 279 unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); } e6e4f42eb773c1 arch/sparc/mm/hugetlbpage.c Peter Zijlstra 2020-11-13 280 unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); } e6e4f42eb773c1 arch/sparc/mm/hugetlbpage.c Peter Zijlstra 2020-11-13 281 a5516438959d90 arch/sparc64/mm/hugetlbpage.c Andi Kleen 2008-07-23 282 pte_t *huge_pte_alloc(struct mm_struct *mm, 58ca9ac367f247 arch/sparc/mm/hugetlbpage.c Peter Xu 2021-02-17 283 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, a5516438959d90 arch/sparc64/mm/hugetlbpage.c Andi Kleen 2008-07-23 284 unsigned long addr, unsigned long sz) ^1da177e4c3f41 arch/sparc64/mm/hugetlbpage.c Linus Torvalds 2005-04-16 @285 { ^1da177e4c3f41 arch/sparc64/mm/hugetlbpage.c Linus Torvalds 2005-04-16 286 pgd_t *pgd; 5637bc50483404 arch/sparc/mm/hugetlbpage.c Mike Rapoport 2019-11-24 287 p4d_t *p4d; ^1da177e4c3f41 arch/sparc64/mm/hugetlbpage.c Linus Torvalds 2005-04-16 288 pud_t *pud; dcd1912d21a025 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-02-06 289 pmd_t *pmd; ^1da177e4c3f41 arch/sparc64/mm/hugetlbpage.c Linus Torvalds 2005-04-16 290 ^1da177e4c3f41 arch/sparc64/mm/hugetlbpage.c Linus Torvalds 2005-04-16 291 pgd = pgd_offset(mm, addr); 5637bc50483404 arch/sparc/mm/hugetlbpage.c Mike Rapoport 2019-11-24 292 p4d = p4d_offset(pgd, addr); 5637bc50483404 arch/sparc/mm/hugetlbpage.c Mike Rapoport 2019-11-24 293 pud = pud_alloc(mm, p4d, addr); df7b2155bbe755 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-08-11 294 if (!pud) df7b2155bbe755 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-08-11 295 return NULL; df7b2155bbe755 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-08-11 296 if (sz >= PUD_SIZE) 4dbe87d5a70215 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-08-11 297 return (pte_t *)pud; dcd1912d21a025 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-02-06 298 pmd = pmd_alloc(mm, pud, addr); dcd1912d21a025 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-02-06 299 if (!pmd) dcd1912d21a025 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-02-06 300 return NULL; 59f1183dd368f1 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-03-03 301 if (sz >= PMD_SIZE) 4dbe87d5a70215 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-08-11 302 return (pte_t *)pmd; 4dbe87d5a70215 arch/sparc/mm/hugetlbpage.c Nitin Gupta 2017-08-11 303 return pte_alloc_map(mm, pmd, addr); ^1da177e4c3f41 arch/sparc64/mm/hugetlbpage.c Linus Torvalds 2005-04-16 304 } ^1da177e4c3f41 arch/sparc64/mm/hugetlbpage.c Linus Torvalds 2005-04-16 305 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 55ecf6de9ff7..6e3bcffe2837 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -252,7 +252,7 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, set_pte(ptep, pte); } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgdp; @@ -286,7 +286,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, } else if (sz == PMD_SIZE) { if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && pud_none(READ_ONCE(*pudp))) - ptep = huge_pmd_share(mm, addr, pudp); + ptep = huge_pmd_share(mm, vma, addr, pudp); else ptep = (pte_t *)pmd_alloc(mm, pudp, addr); } else if (sz == (CONT_PMD_SIZE)) { diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index b331f94d20ac..f993cb36c062 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -25,7 +25,8 @@ unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT; EXPORT_SYMBOL(hpage_shift); pte_t * -huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c index b9f76f433617..7eaff5b07873 100644 --- a/arch/mips/mm/hugetlbpage.c +++ b/arch/mips/mm/hugetlbpage.c @@ -21,8 +21,8 @@ #include <asm/tlb.h> #include <asm/tlbflush.h> -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, - unsigned long sz) +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index d7ba014a7fbb..e141441bfa64 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -44,7 +44,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 8b3cc4d688e8..d57276b8791c 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -106,7 +106,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, * At this point we do the placement change only for BOOK3S 64. This would * possibly work on other subarchs. */ -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { pgd_t *pg; p4d_t *p4; diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 3b5a4d25ca9b..da36d13ffc16 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -189,7 +189,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, return pte; } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgdp; diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 220d7bc43d2b..999ab5916e69 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -21,7 +21,7 @@ #include <asm/tlbflush.h> #include <asm/cacheflush.h> -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index ad4b42f04988..97e0824fdbe7 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -280,6 +280,7 @@ unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&p unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); } pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index b5807f23caf8..a6113fa6d21d 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -152,7 +152,8 @@ void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); -pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); +pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pud_t *pud); struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); @@ -161,7 +162,7 @@ extern struct list_head huge_boot_pages; /* arch callbacks */ -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4bdb58ab14cb..07bb9bdc3282 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3807,7 +3807,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, src_pte = huge_pte_offset(src, addr, sz); if (!src_pte) continue; - dst_pte = huge_pte_alloc(dst, addr, sz); + dst_pte = huge_pte_alloc(dst, vma, addr, sz); if (!dst_pte) { ret = -ENOMEM; break; @@ -4544,7 +4544,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, */ mapping = vma->vm_file->f_mapping; i_mmap_lock_read(mapping); - ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); + ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h)); if (!ptep) { i_mmap_unlock_read(mapping); return VM_FAULT_OOM; @@ -5334,9 +5334,9 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, * if !vma_shareable check at the beginning of the routine. i_mmap_rwsem is * only required for subsequent processing. */ -pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) +pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pud_t *pud) { - struct vm_area_struct *vma = find_vma(mm, addr); struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; @@ -5414,7 +5414,8 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, } #define want_pmd_share() (1) #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ -pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) +pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct vma, + unsigned long addr, pud_t *pud) { return NULL; } @@ -5433,7 +5434,7 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; @@ -5452,7 +5453,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, } else { BUG_ON(sz != PMD_SIZE); if (want_pmd_share() && pud_none(*pud)) - pte = huge_pmd_share(mm, addr, pud); + pte = huge_pmd_share(mm, vma, addr, pud); else pte = (pte_t *)pmd_alloc(mm, pud, addr); } diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 9a3d451402d7..063cbb17e8d8 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -290,7 +290,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, mutex_lock(&hugetlb_fault_mutex_table[hash]); err = -ENOMEM; - dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize); + dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); if (!dst_pte) { mutex_unlock(&hugetlb_fault_mutex_table[hash]); i_mmap_unlock_read(mapping);