Message ID | 372d1488c35dca1c5af04f95d9b8548ea07ea603.1638976229.git.christophe.leroy@csgroup.eu (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Convert powerpc to default topdown mmap layout | expand |
Excerpts from Christophe Leroy's message of December 9, 2021 3:18 am: > vma_mmu_pagesize() is only required for slices, > otherwise there is a generic weak version doing the > exact same thing. > > Move it to slice.c > Reviewed-by: Nicholas Piggin <npiggin@gmail.com> > Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> > --- > arch/powerpc/mm/hugetlbpage.c | 11 ----------- > arch/powerpc/mm/slice.c | 9 +++++++++ > 2 files changed, 9 insertions(+), 11 deletions(-) > > diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c > index ddead41e2194..0eec3b61bd13 100644 > --- a/arch/powerpc/mm/hugetlbpage.c > +++ b/arch/powerpc/mm/hugetlbpage.c > @@ -565,17 +565,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, > } > #endif > > -unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) > -{ > - /* With radix we don't use slice, so derive it from vma*/ > - if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) { > - unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); > - > - return 1UL << mmu_psize_to_shift(psize); > - } > - return vma_kernel_pagesize(vma); > -} > - > bool __init arch_hugetlb_valid_size(unsigned long size) > { > int shift = __ffs(size); > diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c > index f42711f865f3..8a3ac062b71e 100644 > --- a/arch/powerpc/mm/slice.c > +++ b/arch/powerpc/mm/slice.c > @@ -759,4 +759,13 @@ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, > > return !slice_check_range_fits(mm, maskp, addr, len); > } > + > +unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) > +{ > + /* With radix we don't use slice, so derive it from vma*/ > + if (radix_enabled()) > + return vma_kernel_pagesize(vma); > + > + return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start)); > +} > #endif > -- > 2.33.1 > >
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index ddead41e2194..0eec3b61bd13 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -565,17 +565,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } #endif -unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) -{ - /* With radix we don't use slice, so derive it from vma*/ - if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) { - unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); - - return 1UL << mmu_psize_to_shift(psize); - } - return vma_kernel_pagesize(vma); -} - bool __init arch_hugetlb_valid_size(unsigned long size) { int shift = __ffs(size); diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index f42711f865f3..8a3ac062b71e 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -759,4 +759,13 @@ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, return !slice_check_range_fits(mm, maskp, addr, len); } + +unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) +{ + /* With radix we don't use slice, so derive it from vma*/ + if (radix_enabled()) + return vma_kernel_pagesize(vma); + + return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start)); +} #endif
vma_mmu_pagesize() is only required for slices, otherwise there is a generic weak version doing the exact same thing. Move it to slice.c Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> --- arch/powerpc/mm/hugetlbpage.c | 11 ----------- arch/powerpc/mm/slice.c | 9 +++++++++ 2 files changed, 9 insertions(+), 11 deletions(-)