Message ID | cover.1720755677.git.baolin.wang@linux.alibaba.com (mailing list archive) |
---|---|
Headers | show |
Series | Some cleanups for shmem | expand |
On Sat, 13 Jul 2024 21:24:19 +0800 Baolin Wang <baolin.wang@linux.alibaba.com> wrote: > Changes from v1: > - Add a dummy function in case CONFIG_TRANSPARENT_HUGEPAGE is not > enabled, which fixes a building error reported by kernel test robot. The only difference I'm seeing from the v1 series is the below update to [3/3]: --- a/mm/shmem.c~mm-shmem-move-shmem_huge_global_enabled-into-shmem_allowable_huge_orders-v2 +++ a/mm/shmem.c @@ -549,10 +549,9 @@ static bool shmem_confirm_swap(struct ad static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - bool shmem_huge_force, struct vm_area_struct *vma, + bool shmem_huge_force, struct mm_struct *mm, unsigned long vm_flags) { - struct mm_struct *mm = vma ? vma->vm_mm : NULL; loff_t i_size; if (!S_ISREG(inode->i_mode)) @@ -583,14 +582,14 @@ static bool __shmem_huge_global_enabled( } static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - bool shmem_huge_force, struct vm_area_struct *vma, + bool shmem_huge_force, struct mm_struct *mm, unsigned long vm_flags) { if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) return false; return __shmem_huge_global_enabled(inode, index, shmem_huge_force, - vma, vm_flags); + mm, vm_flags); } #if defined(CONFIG_SYSFS) @@ -775,7 +774,7 @@ static unsigned long shmem_unused_huge_s } static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - bool shmem_huge_force, struct vm_area_struct *vma, + bool shmem_huge_force, struct mm_struct *mm, unsigned long vm_flags) { return false; @@ -1638,6 +1637,7 @@ unsigned long shmem_allowable_huge_order unsigned long mask = READ_ONCE(huge_shmem_orders_always); unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); unsigned long vm_flags = vma ? vma->vm_flags : 0; + struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL; /* * Check all the (large) orders below HPAGE_PMD_ORDER + 1 that * are enabled for this vma. @@ -1656,7 +1656,7 @@ unsigned long shmem_allowable_huge_order return 0; global_huge = shmem_huge_global_enabled(inode, index, shmem_huge_force, - vma, vm_flags); + fault_mm, vm_flags); if (!vma || !vma_is_anon_shmem(vma)) { /* * For tmpfs, we now only support PMD sized THP if huge page
On Wed, 24 Jul 2024 12:14:07 -0700 Andrew Morton <akpm@linux-foundation.org> wrote: > The only difference I'm seeing from the v1 series is the below update > to [3/3]: oop. sorry, never mind.