diff mbox series

[v6,10/15] mm/khugepaged: rename prefix of shared collapse functions

Message ID 20220604004004.954674-11-zokeefe@google.com (mailing list archive)
State New
Headers show
Series mm: userspace hugepage collapse | expand

Commit Message

Zach O'Keefe June 4, 2022, 12:39 a.m. UTC
The following functions/tracepoints are shared between khugepaged and
madvise collapse contexts.  Replace the "khugepaged_" prefix with
generic "hpage_collapse_" prefix in such cases:

khugepaged_test_exit() -> hpage_collapse_test_exit()
khugepaged_scan_abort() -> hpage_collapse_scan_abort()
khugepaged_scan_pmd() -> hpage_collapse_scan_pmd()
khugepaged_find_target_node() -> hpage_collapse_find_target_node()
khugepaged_alloc_page() -> hpage_collapse_alloc_page()
huge_memory:mm_khugepaged_scan_pmd ->
	huge_memory:mm_hpage_collapse_scan_pmd

Signed-off-by: Zach O'Keefe <zokeefe@google.com>
---
 include/trace/events/huge_memory.h |  2 +-
 mm/khugepaged.c                    | 71 ++++++++++++++++--------------
 2 files changed, 38 insertions(+), 35 deletions(-)

Comments

Yang Shi June 6, 2022, 11:56 p.m. UTC | #1
On Fri, Jun 3, 2022 at 5:40 PM Zach O'Keefe <zokeefe@google.com> wrote:
>
> The following functions/tracepoints are shared between khugepaged and
> madvise collapse contexts.  Replace the "khugepaged_" prefix with
> generic "hpage_collapse_" prefix in such cases:
>
> khugepaged_test_exit() -> hpage_collapse_test_exit()
> khugepaged_scan_abort() -> hpage_collapse_scan_abort()
> khugepaged_scan_pmd() -> hpage_collapse_scan_pmd()
> khugepaged_find_target_node() -> hpage_collapse_find_target_node()
> khugepaged_alloc_page() -> hpage_collapse_alloc_page()
> huge_memory:mm_khugepaged_scan_pmd ->
>         huge_memory:mm_hpage_collapse_scan_pmd
>
> Signed-off-by: Zach O'Keefe <zokeefe@google.com>
> ---
>  include/trace/events/huge_memory.h |  2 +-
>  mm/khugepaged.c                    | 71 ++++++++++++++++--------------
>  2 files changed, 38 insertions(+), 35 deletions(-)
>
> diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
> index 55392bf30a03..fb6c73632ff3 100644
> --- a/include/trace/events/huge_memory.h
> +++ b/include/trace/events/huge_memory.h
> @@ -48,7 +48,7 @@ SCAN_STATUS
>  #define EM(a, b)       {a, b},
>  #define EMe(a, b)      {a, b}
>
> -TRACE_EVENT(mm_khugepaged_scan_pmd,
> +TRACE_EVENT(mm_hpage_collapse_scan_pmd,

You may not want to change the name of the tracepoint since it is a
part of kernel ABI. Otherwise the patch looks good to me.
Reviewed-by: Yang Shi <shy828301@gmail.om>

>
>         TP_PROTO(struct mm_struct *mm, struct page *page, bool writable,
>                  int referenced, int none_or_zero, int status, int unmapped),
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 073d6bb03b37..119c1bc84af7 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -102,7 +102,7 @@ struct collapse_control {
>         /* Num pages scanned per node */
>         int node_load[MAX_NUMNODES];
>
> -       /* Last target selected in khugepaged_find_target_node() */
> +       /* Last target selected in hpage_collapse_find_target_node() */
>         int last_target_node;
>
>         /* gfp used for allocation and memcg charging */
> @@ -456,7 +456,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
>         hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
>  }
>
> -static inline int khugepaged_test_exit(struct mm_struct *mm)
> +static inline int hpage_collapse_test_exit(struct mm_struct *mm)
>  {
>         return atomic_read(&mm->mm_users) == 0;
>  }
> @@ -508,7 +508,7 @@ void __khugepaged_enter(struct mm_struct *mm)
>                 return;
>
>         /* __khugepaged_exit() must not run from under us */
> -       VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
> +       VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
>         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
>                 free_mm_slot(mm_slot);
>                 return;
> @@ -562,11 +562,10 @@ void __khugepaged_exit(struct mm_struct *mm)
>         } else if (mm_slot) {
>                 /*
>                  * This is required to serialize against
> -                * khugepaged_test_exit() (which is guaranteed to run
> -                * under mmap sem read mode). Stop here (after we
> -                * return all pagetables will be destroyed) until
> -                * khugepaged has finished working on the pagetables
> -                * under the mmap_lock.
> +                * hpage_collapse_test_exit() (which is guaranteed to run
> +                * under mmap sem read mode). Stop here (after we return all
> +                * pagetables will be destroyed) until khugepaged has finished
> +                * working on the pagetables under the mmap_lock.
>                  */
>                 mmap_write_lock(mm);
>                 mmap_write_unlock(mm);
> @@ -803,7 +802,7 @@ static void khugepaged_alloc_sleep(void)
>         remove_wait_queue(&khugepaged_wait, &wait);
>  }
>
> -static bool khugepaged_scan_abort(int nid, struct collapse_control *cc)
> +static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
>  {
>         int i;
>
> @@ -834,7 +833,7 @@ static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
>  }
>
>  #ifdef CONFIG_NUMA
> -static int khugepaged_find_target_node(struct collapse_control *cc)
> +static int hpage_collapse_find_target_node(struct collapse_control *cc)
>  {
>         int nid, target_node = 0, max_value = 0;
>
> @@ -858,7 +857,7 @@ static int khugepaged_find_target_node(struct collapse_control *cc)
>         return target_node;
>  }
>  #else
> -static int khugepaged_find_target_node(struct collapse_control *cc)
> +static int hpage_collapse_find_target_node(struct collapse_control *cc)
>  {
>         return 0;
>  }
> @@ -877,7 +876,7 @@ static bool alloc_fail_should_sleep(int result, bool *wait)
>         return false;
>  }
>
> -static bool khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
> +static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node)
>  {
>         *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
>         if (unlikely(!*hpage)) {
> @@ -905,7 +904,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
>         unsigned long hstart, hend;
>         unsigned long vma_flags;
>
> -       if (unlikely(khugepaged_test_exit(mm)))
> +       if (unlikely(hpage_collapse_test_exit(mm)))
>                 return SCAN_ANY_PROCESS;
>
>         *vmap = vma = find_vma(mm, address);
> @@ -962,7 +961,7 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
>
>  /*
>   * Bring missing pages in from swap, to complete THP collapse.
> - * Only done if khugepaged_scan_pmd believes it is worthwhile.
> + * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
>   *
>   * Called and returns without pte mapped or spinlocks held,
>   * but with mmap_lock held to protect against vma changes.
> @@ -1027,9 +1026,9 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
>  static int alloc_charge_hpage(struct mm_struct *mm, struct page **hpage,
>                               struct collapse_control *cc)
>  {
> -       int node = khugepaged_find_target_node(cc);
> +       int node = hpage_collapse_find_target_node(cc);
>
> -       if (!khugepaged_alloc_page(hpage, cc->gfp, node))
> +       if (!hpage_collapse_alloc_page(hpage, cc->gfp, node))
>                 return SCAN_ALLOC_HUGE_PAGE_FAIL;
>         if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, cc->gfp)))
>                 return SCAN_CGROUP_CHARGE_FAIL;
> @@ -1188,9 +1187,10 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>         return result;
>  }
>
> -static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
> -                              unsigned long address, bool *mmap_locked,
> -                              struct collapse_control *cc)
> +static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> +                                  struct vm_area_struct *vma,
> +                                  unsigned long address, bool *mmap_locked,
> +                                  struct collapse_control *cc)
>  {
>         pmd_t *pmd;
>         pte_t *pte, *_pte;
> @@ -1282,7 +1282,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
>                  * hit record.
>                  */
>                 node = page_to_nid(page);
> -               if (khugepaged_scan_abort(node, cc)) {
> +               if (hpage_collapse_scan_abort(node, cc)) {
>                         result = SCAN_SCAN_ABORT;
>                         goto out_unmap;
>                 }
> @@ -1345,8 +1345,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
>                                             unmapped, cc);
>         }
>  out:
> -       trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
> -                                    none_or_zero, result, unmapped);
> +       trace_mm_hpage_collapse_scan_pmd(mm, page, writable, referenced,
> +                                        none_or_zero, result, unmapped);
>         return result;
>  }
>
> @@ -1356,7 +1356,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
>
>         lockdep_assert_held(&khugepaged_mm_lock);
>
> -       if (khugepaged_test_exit(mm)) {
> +       if (hpage_collapse_test_exit(mm)) {
>                 /* free mm_slot */
>                 hash_del(&mm_slot->hash);
>                 list_del(&mm_slot->mm_node);
> @@ -1530,7 +1530,7 @@ static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
>         if (!mmap_write_trylock(mm))
>                 return;
>
> -       if (unlikely(khugepaged_test_exit(mm)))
> +       if (unlikely(hpage_collapse_test_exit(mm)))
>                 goto out;
>
>         for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
> @@ -1593,7 +1593,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
>                          * it'll always mapped in small page size for uffd-wp
>                          * registered ranges.
>                          */
> -                       if (!khugepaged_test_exit(mm) && !userfaultfd_wp(vma))
> +                       if (!hpage_collapse_test_exit(mm) &&
> +                           !userfaultfd_wp(vma))
>                                 collapse_and_free_pmd(mm, vma, addr, pmd);
>                         mmap_write_unlock(mm);
>                 } else {
> @@ -2020,7 +2021,7 @@ static int khugepaged_scan_file(struct mm_struct *mm, struct file *file,
>                 }
>
>                 node = page_to_nid(page);
> -               if (khugepaged_scan_abort(node, cc)) {
> +               if (hpage_collapse_scan_abort(node, cc)) {
>                         result = SCAN_SCAN_ABORT;
>                         break;
>                 }
> @@ -2114,7 +2115,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>                 goto breakouterloop_mmap_lock;
>
>         progress++;
> -       if (unlikely(khugepaged_test_exit(mm)))
> +       if (unlikely(hpage_collapse_test_exit(mm)))
>                 goto breakouterloop;
>
>         address = khugepaged_scan.address;
> @@ -2123,7 +2124,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>                 unsigned long hstart, hend;
>
>                 cond_resched();
> -               if (unlikely(khugepaged_test_exit(mm))) {
> +               if (unlikely(hpage_collapse_test_exit(mm))) {
>                         progress++;
>                         break;
>                 }
> @@ -2148,7 +2149,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>                         bool mmap_locked = true;
>
>                         cond_resched();
> -                       if (unlikely(khugepaged_test_exit(mm)))
> +                       if (unlikely(hpage_collapse_test_exit(mm)))
>                                 goto breakouterloop;
>
>                         /* reset gfp flags since sysfs settings might change */
> @@ -2168,9 +2169,10 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>                                                                cc);
>                                 fput(file);
>                         } else {
> -                               *result = khugepaged_scan_pmd(mm, vma,
> -                                                             khugepaged_scan.address,
> -                                                             &mmap_locked, cc);
> +                               *result = hpage_collapse_scan_pmd(mm, vma,
> +                                                                 khugepaged_scan.address,
> +                                                                 &mmap_locked,
> +                                                                 cc);
>                         }
>                         if (*result == SCAN_SUCCEED)
>                                 ++khugepaged_pages_collapsed;
> @@ -2200,7 +2202,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>          * Release the current mm_slot if this mm is about to die, or
>          * if we scanned all vmas of this mm.
>          */
> -       if (khugepaged_test_exit(mm) || !vma) {
> +       if (hpage_collapse_test_exit(mm) || !vma) {
>                 /*
>                  * Make sure that if mm_users is reaching zero while
>                  * khugepaged runs here, khugepaged_exit will find
> @@ -2482,7 +2484,8 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
>                 }
>                 mmap_assert_locked(mm);
>                 memset(cc.node_load, 0, sizeof(cc.node_load));
> -               result = khugepaged_scan_pmd(mm, vma, addr, &mmap_locked, &cc);
> +               result = hpage_collapse_scan_pmd(mm, vma, addr, &mmap_locked,
> +                                                &cc);
>                 if (!mmap_locked)
>                         *prev = NULL;  /* Tell caller we dropped mmap_lock */
>
> --
> 2.36.1.255.ge46751e96f-goog
>
Zach O'Keefe June 7, 2022, 12:31 a.m. UTC | #2
On Mon, Jun 6, 2022 at 4:56 PM Yang Shi <shy828301@gmail.com> wrote:
>
> On Fri, Jun 3, 2022 at 5:40 PM Zach O'Keefe <zokeefe@google.com> wrote:
> >
> > The following functions/tracepoints are shared between khugepaged and
> > madvise collapse contexts.  Replace the "khugepaged_" prefix with
> > generic "hpage_collapse_" prefix in such cases:
> >
> > khugepaged_test_exit() -> hpage_collapse_test_exit()
> > khugepaged_scan_abort() -> hpage_collapse_scan_abort()
> > khugepaged_scan_pmd() -> hpage_collapse_scan_pmd()
> > khugepaged_find_target_node() -> hpage_collapse_find_target_node()
> > khugepaged_alloc_page() -> hpage_collapse_alloc_page()
> > huge_memory:mm_khugepaged_scan_pmd ->
> >         huge_memory:mm_hpage_collapse_scan_pmd
> >
> > Signed-off-by: Zach O'Keefe <zokeefe@google.com>
> > ---
> >  include/trace/events/huge_memory.h |  2 +-
> >  mm/khugepaged.c                    | 71 ++++++++++++++++--------------
> >  2 files changed, 38 insertions(+), 35 deletions(-)
> >
> > diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
> > index 55392bf30a03..fb6c73632ff3 100644
> > --- a/include/trace/events/huge_memory.h
> > +++ b/include/trace/events/huge_memory.h
> > @@ -48,7 +48,7 @@ SCAN_STATUS
> >  #define EM(a, b)       {a, b},
> >  #define EMe(a, b)      {a, b}
> >
> > -TRACE_EVENT(mm_khugepaged_scan_pmd,
> > +TRACE_EVENT(mm_hpage_collapse_scan_pmd,
>
> You may not want to change the name of the tracepoint since it is a
> part of kernel ABI. Otherwise the patch looks good to me.
> Reviewed-by: Yang Shi <shy828301@gmail.om>

Thanks for the review, Yang. Yes, this is something I debated / was
unsure about. For the sake of erring on the safer side, I'll remove
the tracepoint renaming. Thanks for voicing your concerns.

> >
> >         TP_PROTO(struct mm_struct *mm, struct page *page, bool writable,
> >                  int referenced, int none_or_zero, int status, int unmapped),
> > diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> > index 073d6bb03b37..119c1bc84af7 100644
> > --- a/mm/khugepaged.c
> > +++ b/mm/khugepaged.c
> > @@ -102,7 +102,7 @@ struct collapse_control {
> >         /* Num pages scanned per node */
> >         int node_load[MAX_NUMNODES];
> >
> > -       /* Last target selected in khugepaged_find_target_node() */
> > +       /* Last target selected in hpage_collapse_find_target_node() */
> >         int last_target_node;
> >
> >         /* gfp used for allocation and memcg charging */
> > @@ -456,7 +456,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
> >         hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
> >  }
> >
> > -static inline int khugepaged_test_exit(struct mm_struct *mm)
> > +static inline int hpage_collapse_test_exit(struct mm_struct *mm)
> >  {
> >         return atomic_read(&mm->mm_users) == 0;
> >  }
> > @@ -508,7 +508,7 @@ void __khugepaged_enter(struct mm_struct *mm)
> >                 return;
> >
> >         /* __khugepaged_exit() must not run from under us */
> > -       VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
> > +       VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
> >         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
> >                 free_mm_slot(mm_slot);
> >                 return;
> > @@ -562,11 +562,10 @@ void __khugepaged_exit(struct mm_struct *mm)
> >         } else if (mm_slot) {
> >                 /*
> >                  * This is required to serialize against
> > -                * khugepaged_test_exit() (which is guaranteed to run
> > -                * under mmap sem read mode). Stop here (after we
> > -                * return all pagetables will be destroyed) until
> > -                * khugepaged has finished working on the pagetables
> > -                * under the mmap_lock.
> > +                * hpage_collapse_test_exit() (which is guaranteed to run
> > +                * under mmap sem read mode). Stop here (after we return all
> > +                * pagetables will be destroyed) until khugepaged has finished
> > +                * working on the pagetables under the mmap_lock.
> >                  */
> >                 mmap_write_lock(mm);
> >                 mmap_write_unlock(mm);
> > @@ -803,7 +802,7 @@ static void khugepaged_alloc_sleep(void)
> >         remove_wait_queue(&khugepaged_wait, &wait);
> >  }
> >
> > -static bool khugepaged_scan_abort(int nid, struct collapse_control *cc)
> > +static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
> >  {
> >         int i;
> >
> > @@ -834,7 +833,7 @@ static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
> >  }
> >
> >  #ifdef CONFIG_NUMA
> > -static int khugepaged_find_target_node(struct collapse_control *cc)
> > +static int hpage_collapse_find_target_node(struct collapse_control *cc)
> >  {
> >         int nid, target_node = 0, max_value = 0;
> >
> > @@ -858,7 +857,7 @@ static int khugepaged_find_target_node(struct collapse_control *cc)
> >         return target_node;
> >  }
> >  #else
> > -static int khugepaged_find_target_node(struct collapse_control *cc)
> > +static int hpage_collapse_find_target_node(struct collapse_control *cc)
> >  {
> >         return 0;
> >  }
> > @@ -877,7 +876,7 @@ static bool alloc_fail_should_sleep(int result, bool *wait)
> >         return false;
> >  }
> >
> > -static bool khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
> > +static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node)
> >  {
> >         *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
> >         if (unlikely(!*hpage)) {
> > @@ -905,7 +904,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
> >         unsigned long hstart, hend;
> >         unsigned long vma_flags;
> >
> > -       if (unlikely(khugepaged_test_exit(mm)))
> > +       if (unlikely(hpage_collapse_test_exit(mm)))
> >                 return SCAN_ANY_PROCESS;
> >
> >         *vmap = vma = find_vma(mm, address);
> > @@ -962,7 +961,7 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
> >
> >  /*
> >   * Bring missing pages in from swap, to complete THP collapse.
> > - * Only done if khugepaged_scan_pmd believes it is worthwhile.
> > + * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
> >   *
> >   * Called and returns without pte mapped or spinlocks held,
> >   * but with mmap_lock held to protect against vma changes.
> > @@ -1027,9 +1026,9 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
> >  static int alloc_charge_hpage(struct mm_struct *mm, struct page **hpage,
> >                               struct collapse_control *cc)
> >  {
> > -       int node = khugepaged_find_target_node(cc);
> > +       int node = hpage_collapse_find_target_node(cc);
> >
> > -       if (!khugepaged_alloc_page(hpage, cc->gfp, node))
> > +       if (!hpage_collapse_alloc_page(hpage, cc->gfp, node))
> >                 return SCAN_ALLOC_HUGE_PAGE_FAIL;
> >         if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, cc->gfp)))
> >                 return SCAN_CGROUP_CHARGE_FAIL;
> > @@ -1188,9 +1187,10 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
> >         return result;
> >  }
> >
> > -static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
> > -                              unsigned long address, bool *mmap_locked,
> > -                              struct collapse_control *cc)
> > +static int hpage_collapse_scan_pmd(struct mm_struct *mm,
> > +                                  struct vm_area_struct *vma,
> > +                                  unsigned long address, bool *mmap_locked,
> > +                                  struct collapse_control *cc)
> >  {
> >         pmd_t *pmd;
> >         pte_t *pte, *_pte;
> > @@ -1282,7 +1282,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
> >                  * hit record.
> >                  */
> >                 node = page_to_nid(page);
> > -               if (khugepaged_scan_abort(node, cc)) {
> > +               if (hpage_collapse_scan_abort(node, cc)) {
> >                         result = SCAN_SCAN_ABORT;
> >                         goto out_unmap;
> >                 }
> > @@ -1345,8 +1345,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
> >                                             unmapped, cc);
> >         }
> >  out:
> > -       trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
> > -                                    none_or_zero, result, unmapped);
> > +       trace_mm_hpage_collapse_scan_pmd(mm, page, writable, referenced,
> > +                                        none_or_zero, result, unmapped);
> >         return result;
> >  }
> >
> > @@ -1356,7 +1356,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
> >
> >         lockdep_assert_held(&khugepaged_mm_lock);
> >
> > -       if (khugepaged_test_exit(mm)) {
> > +       if (hpage_collapse_test_exit(mm)) {
> >                 /* free mm_slot */
> >                 hash_del(&mm_slot->hash);
> >                 list_del(&mm_slot->mm_node);
> > @@ -1530,7 +1530,7 @@ static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
> >         if (!mmap_write_trylock(mm))
> >                 return;
> >
> > -       if (unlikely(khugepaged_test_exit(mm)))
> > +       if (unlikely(hpage_collapse_test_exit(mm)))
> >                 goto out;
> >
> >         for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
> > @@ -1593,7 +1593,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
> >                          * it'll always mapped in small page size for uffd-wp
> >                          * registered ranges.
> >                          */
> > -                       if (!khugepaged_test_exit(mm) && !userfaultfd_wp(vma))
> > +                       if (!hpage_collapse_test_exit(mm) &&
> > +                           !userfaultfd_wp(vma))
> >                                 collapse_and_free_pmd(mm, vma, addr, pmd);
> >                         mmap_write_unlock(mm);
> >                 } else {
> > @@ -2020,7 +2021,7 @@ static int khugepaged_scan_file(struct mm_struct *mm, struct file *file,
> >                 }
> >
> >                 node = page_to_nid(page);
> > -               if (khugepaged_scan_abort(node, cc)) {
> > +               if (hpage_collapse_scan_abort(node, cc)) {
> >                         result = SCAN_SCAN_ABORT;
> >                         break;
> >                 }
> > @@ -2114,7 +2115,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> >                 goto breakouterloop_mmap_lock;
> >
> >         progress++;
> > -       if (unlikely(khugepaged_test_exit(mm)))
> > +       if (unlikely(hpage_collapse_test_exit(mm)))
> >                 goto breakouterloop;
> >
> >         address = khugepaged_scan.address;
> > @@ -2123,7 +2124,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> >                 unsigned long hstart, hend;
> >
> >                 cond_resched();
> > -               if (unlikely(khugepaged_test_exit(mm))) {
> > +               if (unlikely(hpage_collapse_test_exit(mm))) {
> >                         progress++;
> >                         break;
> >                 }
> > @@ -2148,7 +2149,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> >                         bool mmap_locked = true;
> >
> >                         cond_resched();
> > -                       if (unlikely(khugepaged_test_exit(mm)))
> > +                       if (unlikely(hpage_collapse_test_exit(mm)))
> >                                 goto breakouterloop;
> >
> >                         /* reset gfp flags since sysfs settings might change */
> > @@ -2168,9 +2169,10 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> >                                                                cc);
> >                                 fput(file);
> >                         } else {
> > -                               *result = khugepaged_scan_pmd(mm, vma,
> > -                                                             khugepaged_scan.address,
> > -                                                             &mmap_locked, cc);
> > +                               *result = hpage_collapse_scan_pmd(mm, vma,
> > +                                                                 khugepaged_scan.address,
> > +                                                                 &mmap_locked,
> > +                                                                 cc);
> >                         }
> >                         if (*result == SCAN_SUCCEED)
> >                                 ++khugepaged_pages_collapsed;
> > @@ -2200,7 +2202,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> >          * Release the current mm_slot if this mm is about to die, or
> >          * if we scanned all vmas of this mm.
> >          */
> > -       if (khugepaged_test_exit(mm) || !vma) {
> > +       if (hpage_collapse_test_exit(mm) || !vma) {
> >                 /*
> >                  * Make sure that if mm_users is reaching zero while
> >                  * khugepaged runs here, khugepaged_exit will find
> > @@ -2482,7 +2484,8 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
> >                 }
> >                 mmap_assert_locked(mm);
> >                 memset(cc.node_load, 0, sizeof(cc.node_load));
> > -               result = khugepaged_scan_pmd(mm, vma, addr, &mmap_locked, &cc);
> > +               result = hpage_collapse_scan_pmd(mm, vma, addr, &mmap_locked,
> > +                                                &cc);
> >                 if (!mmap_locked)
> >                         *prev = NULL;  /* Tell caller we dropped mmap_lock */
> >
> > --
> > 2.36.1.255.ge46751e96f-goog
> >
diff mbox series

Patch

diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 55392bf30a03..fb6c73632ff3 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -48,7 +48,7 @@  SCAN_STATUS
 #define EM(a, b)	{a, b},
 #define EMe(a, b)	{a, b}
 
-TRACE_EVENT(mm_khugepaged_scan_pmd,
+TRACE_EVENT(mm_hpage_collapse_scan_pmd,
 
 	TP_PROTO(struct mm_struct *mm, struct page *page, bool writable,
 		 int referenced, int none_or_zero, int status, int unmapped),
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 073d6bb03b37..119c1bc84af7 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -102,7 +102,7 @@  struct collapse_control {
 	/* Num pages scanned per node */
 	int node_load[MAX_NUMNODES];
 
-	/* Last target selected in khugepaged_find_target_node() */
+	/* Last target selected in hpage_collapse_find_target_node() */
 	int last_target_node;
 
 	/* gfp used for allocation and memcg charging */
@@ -456,7 +456,7 @@  static void insert_to_mm_slots_hash(struct mm_struct *mm,
 	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
 }
 
-static inline int khugepaged_test_exit(struct mm_struct *mm)
+static inline int hpage_collapse_test_exit(struct mm_struct *mm)
 {
 	return atomic_read(&mm->mm_users) == 0;
 }
@@ -508,7 +508,7 @@  void __khugepaged_enter(struct mm_struct *mm)
 		return;
 
 	/* __khugepaged_exit() must not run from under us */
-	VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
+	VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
 		free_mm_slot(mm_slot);
 		return;
@@ -562,11 +562,10 @@  void __khugepaged_exit(struct mm_struct *mm)
 	} else if (mm_slot) {
 		/*
 		 * This is required to serialize against
-		 * khugepaged_test_exit() (which is guaranteed to run
-		 * under mmap sem read mode). Stop here (after we
-		 * return all pagetables will be destroyed) until
-		 * khugepaged has finished working on the pagetables
-		 * under the mmap_lock.
+		 * hpage_collapse_test_exit() (which is guaranteed to run
+		 * under mmap sem read mode). Stop here (after we return all
+		 * pagetables will be destroyed) until khugepaged has finished
+		 * working on the pagetables under the mmap_lock.
 		 */
 		mmap_write_lock(mm);
 		mmap_write_unlock(mm);
@@ -803,7 +802,7 @@  static void khugepaged_alloc_sleep(void)
 	remove_wait_queue(&khugepaged_wait, &wait);
 }
 
-static bool khugepaged_scan_abort(int nid, struct collapse_control *cc)
+static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
 {
 	int i;
 
@@ -834,7 +833,7 @@  static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
 }
 
 #ifdef CONFIG_NUMA
-static int khugepaged_find_target_node(struct collapse_control *cc)
+static int hpage_collapse_find_target_node(struct collapse_control *cc)
 {
 	int nid, target_node = 0, max_value = 0;
 
@@ -858,7 +857,7 @@  static int khugepaged_find_target_node(struct collapse_control *cc)
 	return target_node;
 }
 #else
-static int khugepaged_find_target_node(struct collapse_control *cc)
+static int hpage_collapse_find_target_node(struct collapse_control *cc)
 {
 	return 0;
 }
@@ -877,7 +876,7 @@  static bool alloc_fail_should_sleep(int result, bool *wait)
 	return false;
 }
 
-static bool khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
+static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node)
 {
 	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
 	if (unlikely(!*hpage)) {
@@ -905,7 +904,7 @@  static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
 	unsigned long hstart, hend;
 	unsigned long vma_flags;
 
-	if (unlikely(khugepaged_test_exit(mm)))
+	if (unlikely(hpage_collapse_test_exit(mm)))
 		return SCAN_ANY_PROCESS;
 
 	*vmap = vma = find_vma(mm, address);
@@ -962,7 +961,7 @@  static int find_pmd_or_thp_or_none(struct mm_struct *mm,
 
 /*
  * Bring missing pages in from swap, to complete THP collapse.
- * Only done if khugepaged_scan_pmd believes it is worthwhile.
+ * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
  *
  * Called and returns without pte mapped or spinlocks held,
  * but with mmap_lock held to protect against vma changes.
@@ -1027,9 +1026,9 @@  static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 static int alloc_charge_hpage(struct mm_struct *mm, struct page **hpage,
 			      struct collapse_control *cc)
 {
-	int node = khugepaged_find_target_node(cc);
+	int node = hpage_collapse_find_target_node(cc);
 
-	if (!khugepaged_alloc_page(hpage, cc->gfp, node))
+	if (!hpage_collapse_alloc_page(hpage, cc->gfp, node))
 		return SCAN_ALLOC_HUGE_PAGE_FAIL;
 	if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, cc->gfp)))
 		return SCAN_CGROUP_CHARGE_FAIL;
@@ -1188,9 +1187,10 @@  static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 	return result;
 }
 
-static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
-			       unsigned long address, bool *mmap_locked,
-			       struct collapse_control *cc)
+static int hpage_collapse_scan_pmd(struct mm_struct *mm,
+				   struct vm_area_struct *vma,
+				   unsigned long address, bool *mmap_locked,
+				   struct collapse_control *cc)
 {
 	pmd_t *pmd;
 	pte_t *pte, *_pte;
@@ -1282,7 +1282,7 @@  static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
 		 * hit record.
 		 */
 		node = page_to_nid(page);
-		if (khugepaged_scan_abort(node, cc)) {
+		if (hpage_collapse_scan_abort(node, cc)) {
 			result = SCAN_SCAN_ABORT;
 			goto out_unmap;
 		}
@@ -1345,8 +1345,8 @@  static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
 					    unmapped, cc);
 	}
 out:
-	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
-				     none_or_zero, result, unmapped);
+	trace_mm_hpage_collapse_scan_pmd(mm, page, writable, referenced,
+					 none_or_zero, result, unmapped);
 	return result;
 }
 
@@ -1356,7 +1356,7 @@  static void collect_mm_slot(struct mm_slot *mm_slot)
 
 	lockdep_assert_held(&khugepaged_mm_lock);
 
-	if (khugepaged_test_exit(mm)) {
+	if (hpage_collapse_test_exit(mm)) {
 		/* free mm_slot */
 		hash_del(&mm_slot->hash);
 		list_del(&mm_slot->mm_node);
@@ -1530,7 +1530,7 @@  static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
 	if (!mmap_write_trylock(mm))
 		return;
 
-	if (unlikely(khugepaged_test_exit(mm)))
+	if (unlikely(hpage_collapse_test_exit(mm)))
 		goto out;
 
 	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
@@ -1593,7 +1593,8 @@  static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
 			 * it'll always mapped in small page size for uffd-wp
 			 * registered ranges.
 			 */
-			if (!khugepaged_test_exit(mm) && !userfaultfd_wp(vma))
+			if (!hpage_collapse_test_exit(mm) &&
+			    !userfaultfd_wp(vma))
 				collapse_and_free_pmd(mm, vma, addr, pmd);
 			mmap_write_unlock(mm);
 		} else {
@@ -2020,7 +2021,7 @@  static int khugepaged_scan_file(struct mm_struct *mm, struct file *file,
 		}
 
 		node = page_to_nid(page);
-		if (khugepaged_scan_abort(node, cc)) {
+		if (hpage_collapse_scan_abort(node, cc)) {
 			result = SCAN_SCAN_ABORT;
 			break;
 		}
@@ -2114,7 +2115,7 @@  static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
 		goto breakouterloop_mmap_lock;
 
 	progress++;
-	if (unlikely(khugepaged_test_exit(mm)))
+	if (unlikely(hpage_collapse_test_exit(mm)))
 		goto breakouterloop;
 
 	address = khugepaged_scan.address;
@@ -2123,7 +2124,7 @@  static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
 		unsigned long hstart, hend;
 
 		cond_resched();
-		if (unlikely(khugepaged_test_exit(mm))) {
+		if (unlikely(hpage_collapse_test_exit(mm))) {
 			progress++;
 			break;
 		}
@@ -2148,7 +2149,7 @@  static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
 			bool mmap_locked = true;
 
 			cond_resched();
-			if (unlikely(khugepaged_test_exit(mm)))
+			if (unlikely(hpage_collapse_test_exit(mm)))
 				goto breakouterloop;
 
 			/* reset gfp flags since sysfs settings might change */
@@ -2168,9 +2169,10 @@  static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
 							       cc);
 				fput(file);
 			} else {
-				*result = khugepaged_scan_pmd(mm, vma,
-							      khugepaged_scan.address,
-							      &mmap_locked, cc);
+				*result = hpage_collapse_scan_pmd(mm, vma,
+								  khugepaged_scan.address,
+								  &mmap_locked,
+								  cc);
 			}
 			if (*result == SCAN_SUCCEED)
 				++khugepaged_pages_collapsed;
@@ -2200,7 +2202,7 @@  static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
 	 * Release the current mm_slot if this mm is about to die, or
 	 * if we scanned all vmas of this mm.
 	 */
-	if (khugepaged_test_exit(mm) || !vma) {
+	if (hpage_collapse_test_exit(mm) || !vma) {
 		/*
 		 * Make sure that if mm_users is reaching zero while
 		 * khugepaged runs here, khugepaged_exit will find
@@ -2482,7 +2484,8 @@  int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
 		}
 		mmap_assert_locked(mm);
 		memset(cc.node_load, 0, sizeof(cc.node_load));
-		result = khugepaged_scan_pmd(mm, vma, addr, &mmap_locked, &cc);
+		result = hpage_collapse_scan_pmd(mm, vma, addr, &mmap_locked,
+						 &cc);
 		if (!mmap_locked)
 			*prev = NULL;  /* Tell caller we dropped mmap_lock */