Message ID | 20220404200250.321455-7-shy828301@gmail.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Make khugepaged collapse readonly FS THP more consistent | expand |
On 4/4/22 22:02, Yang Shi wrote: > To reuse hugepage_vma_check() for khugepaged_enter() so that we could > remove some duplicate code. But moving hugepage_vma_check() to > khugepaged.h needs to include huge_mm.h in it, it seems not optimal to > bloat khugepaged.h. > > And the khugepaged_* functions actually are wrappers for some non-inline > functions, so it seems the benefits are not too much to keep them inline. > > So move the khugepaged_* functions to khugepaged.c, any callers just > need to include khugepaged.h which is quite small. For example, the > following patches will call khugepaged_enter() in filemap page fault path > for regular filesystems to make readonly FS THP collapse more consistent. > The filemap.c just needs to include khugepaged.h. This last part is inaccurate in v3? > Acked-by: Song Liu <song@kernel.org> > Signed-off-by: Yang Shi <shy828301@gmail.com> I think moving the tiny wrappers is unnecessary. How about just making hugepage_vma_check() not static and declare it in khugepaged.h, then it can be used from khugepaged_enter() in the same file and AFAICS no need to include huge_mm.h there? > --- > include/linux/khugepaged.h | 37 ++++++------------------------------- > mm/khugepaged.c | 20 ++++++++++++++++++++ > 2 files changed, 26 insertions(+), 31 deletions(-) > > diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h > index 0423d3619f26..6acf9701151e 100644 > --- a/include/linux/khugepaged.h > +++ b/include/linux/khugepaged.h > @@ -2,10 +2,6 @@ > #ifndef _LINUX_KHUGEPAGED_H > #define _LINUX_KHUGEPAGED_H > > -#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */ > -#include <linux/shmem_fs.h> > - > - > #ifdef CONFIG_TRANSPARENT_HUGEPAGE > extern struct attribute_group khugepaged_attr_group; > > @@ -16,6 +12,12 @@ extern void __khugepaged_enter(struct mm_struct *mm); > extern void __khugepaged_exit(struct mm_struct *mm); > extern void khugepaged_enter_vma_merge(struct vm_area_struct *vma, > unsigned long vm_flags); > +extern void khugepaged_fork(struct mm_struct *mm, > + struct mm_struct *oldmm); > +extern void khugepaged_exit(struct mm_struct *mm); > +extern void khugepaged_enter(struct vm_area_struct *vma, > + unsigned long vm_flags); > + > extern void khugepaged_min_free_kbytes_update(void); > #ifdef CONFIG_SHMEM > extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); > @@ -33,36 +35,9 @@ static inline void collapse_pte_mapped_thp(struct mm_struct *mm, > #define khugepaged_always() \ > (transparent_hugepage_flags & \ > (1<<TRANSPARENT_HUGEPAGE_FLAG)) > -#define khugepaged_req_madv() \ > - (transparent_hugepage_flags & \ > - (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) > #define khugepaged_defrag() \ > (transparent_hugepage_flags & \ > (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)) > - > -static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) > -{ > - if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags)) > - __khugepaged_enter(mm); > -} > - > -static inline void khugepaged_exit(struct mm_struct *mm) > -{ > - if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) > - __khugepaged_exit(mm); > -} > - > -static inline void khugepaged_enter(struct vm_area_struct *vma, > - unsigned long vm_flags) > -{ > - if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) > - if ((khugepaged_always() || > - (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) || > - (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) && > - !(vm_flags & VM_NOHUGEPAGE) && > - !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) > - __khugepaged_enter(vma->vm_mm); > -} > #else /* CONFIG_TRANSPARENT_HUGEPAGE */ > static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) > { > diff --git a/mm/khugepaged.c b/mm/khugepaged.c > index b69eda934d70..ec5b0a691d87 100644 > --- a/mm/khugepaged.c > +++ b/mm/khugepaged.c > @@ -556,6 +556,26 @@ void __khugepaged_exit(struct mm_struct *mm) > } > } > > +void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) > +{ > + if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags)) > + __khugepaged_enter(mm); > +} > + > +void khugepaged_exit(struct mm_struct *mm) > +{ > + if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) > + __khugepaged_exit(mm); > +} > + > +void khugepaged_enter(struct vm_area_struct *vma, unsigned long vm_flags) > +{ > + if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && > + khugepaged_enabled()) > + if (hugepage_vma_check(vma, vm_flags)) > + __khugepaged_enter(vma->vm_mm); > +} > + > static void release_pte_page(struct page *page) > { > mod_node_page_state(page_pgdat(page),
On Mon, May 9, 2022 at 8:31 AM Vlastimil Babka <vbabka@suse.cz> wrote: > > On 4/4/22 22:02, Yang Shi wrote: > > To reuse hugepage_vma_check() for khugepaged_enter() so that we could > > remove some duplicate code. But moving hugepage_vma_check() to > > khugepaged.h needs to include huge_mm.h in it, it seems not optimal to > > bloat khugepaged.h. > > > > And the khugepaged_* functions actually are wrappers for some non-inline > > functions, so it seems the benefits are not too much to keep them inline. > > > > So move the khugepaged_* functions to khugepaged.c, any callers just > > need to include khugepaged.h which is quite small. For example, the > > following patches will call khugepaged_enter() in filemap page fault path > > for regular filesystems to make readonly FS THP collapse more consistent. > > The filemap.c just needs to include khugepaged.h. > > This last part is inaccurate in v3? Yeah, thanks for catching this. Since the patch will be reworked, so the commit log will be reworked as well. > > > Acked-by: Song Liu <song@kernel.org> > > Signed-off-by: Yang Shi <shy828301@gmail.com> > > I think moving the tiny wrappers is unnecessary. > > How about just making hugepage_vma_check() not static and declare it in > khugepaged.h, then it can be used from khugepaged_enter() in the same file > and AFAICS no need to include huge_mm.h there? Sounds good to me, will fix it in v4. Thanks for reviewing and acking the series. > > > --- > > include/linux/khugepaged.h | 37 ++++++------------------------------- > > mm/khugepaged.c | 20 ++++++++++++++++++++ > > 2 files changed, 26 insertions(+), 31 deletions(-) > > > > diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h > > index 0423d3619f26..6acf9701151e 100644 > > --- a/include/linux/khugepaged.h > > +++ b/include/linux/khugepaged.h > > @@ -2,10 +2,6 @@ > > #ifndef _LINUX_KHUGEPAGED_H > > #define _LINUX_KHUGEPAGED_H > > > > -#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */ > > -#include <linux/shmem_fs.h> > > - > > - > > #ifdef CONFIG_TRANSPARENT_HUGEPAGE > > extern struct attribute_group khugepaged_attr_group; > > > > @@ -16,6 +12,12 @@ extern void __khugepaged_enter(struct mm_struct *mm); > > extern void __khugepaged_exit(struct mm_struct *mm); > > extern void khugepaged_enter_vma_merge(struct vm_area_struct *vma, > > unsigned long vm_flags); > > +extern void khugepaged_fork(struct mm_struct *mm, > > + struct mm_struct *oldmm); > > +extern void khugepaged_exit(struct mm_struct *mm); > > +extern void khugepaged_enter(struct vm_area_struct *vma, > > + unsigned long vm_flags); > > + > > extern void khugepaged_min_free_kbytes_update(void); > > #ifdef CONFIG_SHMEM > > extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); > > @@ -33,36 +35,9 @@ static inline void collapse_pte_mapped_thp(struct mm_struct *mm, > > #define khugepaged_always() \ > > (transparent_hugepage_flags & \ > > (1<<TRANSPARENT_HUGEPAGE_FLAG)) > > -#define khugepaged_req_madv() \ > > - (transparent_hugepage_flags & \ > > - (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) > > #define khugepaged_defrag() \ > > (transparent_hugepage_flags & \ > > (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)) > > - > > -static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) > > -{ > > - if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags)) > > - __khugepaged_enter(mm); > > -} > > - > > -static inline void khugepaged_exit(struct mm_struct *mm) > > -{ > > - if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) > > - __khugepaged_exit(mm); > > -} > > - > > -static inline void khugepaged_enter(struct vm_area_struct *vma, > > - unsigned long vm_flags) > > -{ > > - if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) > > - if ((khugepaged_always() || > > - (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) || > > - (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) && > > - !(vm_flags & VM_NOHUGEPAGE) && > > - !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) > > - __khugepaged_enter(vma->vm_mm); > > -} > > #else /* CONFIG_TRANSPARENT_HUGEPAGE */ > > static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) > > { > > diff --git a/mm/khugepaged.c b/mm/khugepaged.c > > index b69eda934d70..ec5b0a691d87 100644 > > --- a/mm/khugepaged.c > > +++ b/mm/khugepaged.c > > @@ -556,6 +556,26 @@ void __khugepaged_exit(struct mm_struct *mm) > > } > > } > > > > +void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) > > +{ > > + if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags)) > > + __khugepaged_enter(mm); > > +} > > + > > +void khugepaged_exit(struct mm_struct *mm) > > +{ > > + if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) > > + __khugepaged_exit(mm); > > +} > > + > > +void khugepaged_enter(struct vm_area_struct *vma, unsigned long vm_flags) > > +{ > > + if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && > > + khugepaged_enabled()) > > + if (hugepage_vma_check(vma, vm_flags)) > > + __khugepaged_enter(vma->vm_mm); > > +} > > + > > static void release_pte_page(struct page *page) > > { > > mod_node_page_state(page_pgdat(page), >
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 0423d3619f26..6acf9701151e 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -2,10 +2,6 @@ #ifndef _LINUX_KHUGEPAGED_H #define _LINUX_KHUGEPAGED_H -#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */ -#include <linux/shmem_fs.h> - - #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern struct attribute_group khugepaged_attr_group; @@ -16,6 +12,12 @@ extern void __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern void khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags); +extern void khugepaged_fork(struct mm_struct *mm, + struct mm_struct *oldmm); +extern void khugepaged_exit(struct mm_struct *mm); +extern void khugepaged_enter(struct vm_area_struct *vma, + unsigned long vm_flags); + extern void khugepaged_min_free_kbytes_update(void); #ifdef CONFIG_SHMEM extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); @@ -33,36 +35,9 @@ static inline void collapse_pte_mapped_thp(struct mm_struct *mm, #define khugepaged_always() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_FLAG)) -#define khugepaged_req_madv() \ - (transparent_hugepage_flags & \ - (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) #define khugepaged_defrag() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)) - -static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) -{ - if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags)) - __khugepaged_enter(mm); -} - -static inline void khugepaged_exit(struct mm_struct *mm) -{ - if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) - __khugepaged_exit(mm); -} - -static inline void khugepaged_enter(struct vm_area_struct *vma, - unsigned long vm_flags) -{ - if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) - if ((khugepaged_always() || - (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) || - (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) && - !(vm_flags & VM_NOHUGEPAGE) && - !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) - __khugepaged_enter(vma->vm_mm); -} #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) { diff --git a/mm/khugepaged.c b/mm/khugepaged.c index b69eda934d70..ec5b0a691d87 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -556,6 +556,26 @@ void __khugepaged_exit(struct mm_struct *mm) } } +void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags)) + __khugepaged_enter(mm); +} + +void khugepaged_exit(struct mm_struct *mm) +{ + if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) + __khugepaged_exit(mm); +} + +void khugepaged_enter(struct vm_area_struct *vma, unsigned long vm_flags) +{ + if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && + khugepaged_enabled()) + if (hugepage_vma_check(vma, vm_flags)) + __khugepaged_enter(vma->vm_mm); +} + static void release_pte_page(struct page *page) { mod_node_page_state(page_pgdat(page),