diff mbox series

[v6,04/15] mm/khugepaged: dedup and simplify hugepage alloc and charging

Message ID 20220604004004.954674-5-zokeefe@google.com (mailing list archive)
State New
Headers show
Series mm: userspace hugepage collapse | expand

Commit Message

Zach O'Keefe June 4, 2022, 12:39 a.m. UTC
The following code is duplicated in collapse_huge_page() and
collapse_file():

        gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;

	new_page = khugepaged_alloc_page(hpage, gfp, node);
        if (!new_page) {
                result = SCAN_ALLOC_HUGE_PAGE_FAIL;
                goto out;
        }

        if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
                result = SCAN_CGROUP_CHARGE_FAIL;
                goto out;
        }
        count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);

Also, "node" is passed as an argument to both collapse_huge_page() and
collapse_file() and obtained the same way, via
khugepaged_find_target_node().

Move all this into a new helper, alloc_charge_hpage(), and remove the
duplicate code from collapse_huge_page() and collapse_file().  Also,
simplify khugepaged_alloc_page() by returning a bool indicating
allocation success instead of a copy of the allocated struct page.

Suggested-by: Peter Xu <peterx@redhat.com>

---

Signed-off-by: Zach O'Keefe <zokeefe@google.com>
---
 mm/khugepaged.c | 77 ++++++++++++++++++++++---------------------------
 1 file changed, 34 insertions(+), 43 deletions(-)

Comments

Yang Shi June 6, 2022, 8:50 p.m. UTC | #1
On Fri, Jun 3, 2022 at 5:40 PM Zach O'Keefe <zokeefe@google.com> wrote:
>
> The following code is duplicated in collapse_huge_page() and
> collapse_file():
>
>         gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
>
>         new_page = khugepaged_alloc_page(hpage, gfp, node);
>         if (!new_page) {
>                 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
>                 goto out;
>         }
>
>         if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
>                 result = SCAN_CGROUP_CHARGE_FAIL;
>                 goto out;
>         }
>         count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
>
> Also, "node" is passed as an argument to both collapse_huge_page() and
> collapse_file() and obtained the same way, via
> khugepaged_find_target_node().
>
> Move all this into a new helper, alloc_charge_hpage(), and remove the
> duplicate code from collapse_huge_page() and collapse_file().  Also,
> simplify khugepaged_alloc_page() by returning a bool indicating
> allocation success instead of a copy of the allocated struct page.
>
> Suggested-by: Peter Xu <peterx@redhat.com>
>
> ---
>
> Signed-off-by: Zach O'Keefe <zokeefe@google.com>

Reviewed-by: Yang Shi <shy828301@gmail.com>

> ---
>  mm/khugepaged.c | 77 ++++++++++++++++++++++---------------------------
>  1 file changed, 34 insertions(+), 43 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 907d0b2bd4bd..38488d114073 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -860,19 +860,18 @@ static bool alloc_fail_should_sleep(struct page **hpage, bool *wait)
>         return false;
>  }
>
> -static struct page *
> -khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
> +static bool khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
>  {
>         *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
>         if (unlikely(!*hpage)) {
>                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
>                 *hpage = ERR_PTR(-ENOMEM);
> -               return NULL;
> +               return false;
>         }
>
>         prep_transhuge_page(*hpage);
>         count_vm_event(THP_COLLAPSE_ALLOC);
> -       return *hpage;
> +       return true;
>  }
>
>  /*
> @@ -995,10 +994,23 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
>         return true;
>  }
>
> -static void collapse_huge_page(struct mm_struct *mm,
> -                                  unsigned long address,
> -                                  struct page **hpage,
> -                                  int node, int referenced, int unmapped)
> +static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
> +                             struct collapse_control *cc)
> +{
> +       gfp_t gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
> +       int node = khugepaged_find_target_node(cc);
> +
> +       if (!khugepaged_alloc_page(hpage, gfp, node))
> +               return SCAN_ALLOC_HUGE_PAGE_FAIL;
> +       if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
> +               return SCAN_CGROUP_CHARGE_FAIL;
> +       count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
> +       return SCAN_SUCCEED;
> +}
> +
> +static void collapse_huge_page(struct mm_struct *mm, unsigned long address,
> +                              struct page **hpage, int referenced,
> +                              int unmapped, struct collapse_control *cc)
>  {
>         LIST_HEAD(compound_pagelist);
>         pmd_t *pmd, _pmd;
> @@ -1009,13 +1021,9 @@ static void collapse_huge_page(struct mm_struct *mm,
>         int isolated = 0, result = 0;
>         struct vm_area_struct *vma;
>         struct mmu_notifier_range range;
> -       gfp_t gfp;
>
>         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
>
> -       /* Only allocate from the target node */
> -       gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
> -
>         /*
>          * Before allocating the hugepage, release the mmap_lock read lock.
>          * The allocation can take potentially a long time if it involves
> @@ -1023,17 +1031,12 @@ static void collapse_huge_page(struct mm_struct *mm,
>          * that. We will recheck the vma after taking it again in write mode.
>          */
>         mmap_read_unlock(mm);
> -       new_page = khugepaged_alloc_page(hpage, gfp, node);
> -       if (!new_page) {
> -               result = SCAN_ALLOC_HUGE_PAGE_FAIL;
> -               goto out_nolock;
> -       }
>
> -       if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
> -               result = SCAN_CGROUP_CHARGE_FAIL;
> +       result = alloc_charge_hpage(hpage, mm, cc);
> +       if (result != SCAN_SUCCEED)
>                 goto out_nolock;
> -       }
> -       count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
> +
> +       new_page = *hpage;
>
>         mmap_read_lock(mm);
>         result = hugepage_vma_revalidate(mm, address, &vma);
> @@ -1306,10 +1309,9 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
>  out_unmap:
>         pte_unmap_unlock(pte, ptl);
>         if (ret) {
> -               node = khugepaged_find_target_node(cc);
>                 /* collapse_huge_page will return with the mmap_lock released */
> -               collapse_huge_page(mm, address, hpage, node,
> -                               referenced, unmapped);
> +               collapse_huge_page(mm, address, hpage, referenced, unmapped,
> +                                  cc);
>         }
>  out:
>         trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
> @@ -1578,7 +1580,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
>   * @file: file that collapse on
>   * @start: collapse start address
>   * @hpage: new allocated huge page for collapse
> - * @node: appointed node the new huge page allocate from
> + * @cc: collapse context and scratchpad
>   *
>   * Basic scheme is simple, details are more complex:
>   *  - allocate and lock a new huge page;
> @@ -1595,12 +1597,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
>   *    + restore gaps in the page cache;
>   *    + unlock and free huge page;
>   */
> -static void collapse_file(struct mm_struct *mm,
> -               struct file *file, pgoff_t start,
> -               struct page **hpage, int node)
> +static void collapse_file(struct mm_struct *mm, struct file *file,
> +                         pgoff_t start, struct page **hpage,
> +                         struct collapse_control *cc)
>  {
>         struct address_space *mapping = file->f_mapping;
> -       gfp_t gfp;
>         struct page *new_page;
>         pgoff_t index, end = start + HPAGE_PMD_NR;
>         LIST_HEAD(pagelist);
> @@ -1612,20 +1613,11 @@ static void collapse_file(struct mm_struct *mm,
>         VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
>         VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
>
> -       /* Only allocate from the target node */
> -       gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
> -
> -       new_page = khugepaged_alloc_page(hpage, gfp, node);
> -       if (!new_page) {
> -               result = SCAN_ALLOC_HUGE_PAGE_FAIL;
> +       result = alloc_charge_hpage(hpage, mm, cc);
> +       if (result != SCAN_SUCCEED)
>                 goto out;
> -       }
>
> -       if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
> -               result = SCAN_CGROUP_CHARGE_FAIL;
> -               goto out;
> -       }
> -       count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
> +       new_page = *hpage;
>
>         /*
>          * Ensure we have slots for all the pages in the range.  This is
> @@ -2037,8 +2029,7 @@ static void khugepaged_scan_file(struct mm_struct *mm, struct file *file,
>                         result = SCAN_EXCEED_NONE_PTE;
>                         count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
>                 } else {
> -                       node = khugepaged_find_target_node(cc);
> -                       collapse_file(mm, file, start, hpage, node);
> +                       collapse_file(mm, file, start, hpage, cc);
>                 }
>         }
>
> --
> 2.36.1.255.ge46751e96f-goog
>
Peter Xu June 29, 2022, 9:58 p.m. UTC | #2
On Fri, Jun 03, 2022 at 05:39:53PM -0700, Zach O'Keefe wrote:
> The following code is duplicated in collapse_huge_page() and
> collapse_file():
> 
>         gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
> 
> 	new_page = khugepaged_alloc_page(hpage, gfp, node);
>         if (!new_page) {
>                 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
>                 goto out;
>         }
> 
>         if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
>                 result = SCAN_CGROUP_CHARGE_FAIL;
>                 goto out;
>         }
>         count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
> 
> Also, "node" is passed as an argument to both collapse_huge_page() and
> collapse_file() and obtained the same way, via
> khugepaged_find_target_node().
> 
> Move all this into a new helper, alloc_charge_hpage(), and remove the
> duplicate code from collapse_huge_page() and collapse_file().  Also,
> simplify khugepaged_alloc_page() by returning a bool indicating
> allocation success instead of a copy of the allocated struct page.
> 
> Suggested-by: Peter Xu <peterx@redhat.com>
> 
> ---

[note: please remember to drop this "---" when repost since I think it
 could drop your sign-off when apply]

> 
> Signed-off-by: Zach O'Keefe <zokeefe@google.com>

Reviewed-by: Peter Xu <peterx@redhat.com>

Thanks,
Zach O'Keefe June 30, 2022, 8:14 p.m. UTC | #3
On Jun 29 17:58, Peter Xu wrote:
> On Fri, Jun 03, 2022 at 05:39:53PM -0700, Zach O'Keefe wrote:
> > The following code is duplicated in collapse_huge_page() and
> > collapse_file():
> > 
> >         gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
> > 
> > 	new_page = khugepaged_alloc_page(hpage, gfp, node);
> >         if (!new_page) {
> >                 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
> >                 goto out;
> >         }
> > 
> >         if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
> >                 result = SCAN_CGROUP_CHARGE_FAIL;
> >                 goto out;
> >         }
> >         count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
> > 
> > Also, "node" is passed as an argument to both collapse_huge_page() and
> > collapse_file() and obtained the same way, via
> > khugepaged_find_target_node().
> > 
> > Move all this into a new helper, alloc_charge_hpage(), and remove the
> > duplicate code from collapse_huge_page() and collapse_file().  Also,
> > simplify khugepaged_alloc_page() by returning a bool indicating
> > allocation success instead of a copy of the allocated struct page.
> > 
> > Suggested-by: Peter Xu <peterx@redhat.com>
> > 
> > ---
> 
> [note: please remember to drop this "---" when repost since I think it
>  could drop your sign-off when apply]
>

Thanks for catching this, Peter! Fixed locally!

Best,
Zach

> > 
> > Signed-off-by: Zach O'Keefe <zokeefe@google.com>
> 
> Reviewed-by: Peter Xu <peterx@redhat.com>
> 
> Thanks,
> 
> -- 
> Peter Xu
>
diff mbox series

Patch

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 907d0b2bd4bd..38488d114073 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -860,19 +860,18 @@  static bool alloc_fail_should_sleep(struct page **hpage, bool *wait)
 	return false;
 }
 
-static struct page *
-khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
+static bool khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 {
 	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
 	if (unlikely(!*hpage)) {
 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 		*hpage = ERR_PTR(-ENOMEM);
-		return NULL;
+		return false;
 	}
 
 	prep_transhuge_page(*hpage);
 	count_vm_event(THP_COLLAPSE_ALLOC);
-	return *hpage;
+	return true;
 }
 
 /*
@@ -995,10 +994,23 @@  static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 	return true;
 }
 
-static void collapse_huge_page(struct mm_struct *mm,
-				   unsigned long address,
-				   struct page **hpage,
-				   int node, int referenced, int unmapped)
+static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
+			      struct collapse_control *cc)
+{
+	gfp_t gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
+	int node = khugepaged_find_target_node(cc);
+
+	if (!khugepaged_alloc_page(hpage, gfp, node))
+		return SCAN_ALLOC_HUGE_PAGE_FAIL;
+	if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
+		return SCAN_CGROUP_CHARGE_FAIL;
+	count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
+	return SCAN_SUCCEED;
+}
+
+static void collapse_huge_page(struct mm_struct *mm, unsigned long address,
+			       struct page **hpage, int referenced,
+			       int unmapped, struct collapse_control *cc)
 {
 	LIST_HEAD(compound_pagelist);
 	pmd_t *pmd, _pmd;
@@ -1009,13 +1021,9 @@  static void collapse_huge_page(struct mm_struct *mm,
 	int isolated = 0, result = 0;
 	struct vm_area_struct *vma;
 	struct mmu_notifier_range range;
-	gfp_t gfp;
 
 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
-	/* Only allocate from the target node */
-	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
-
 	/*
 	 * Before allocating the hugepage, release the mmap_lock read lock.
 	 * The allocation can take potentially a long time if it involves
@@ -1023,17 +1031,12 @@  static void collapse_huge_page(struct mm_struct *mm,
 	 * that. We will recheck the vma after taking it again in write mode.
 	 */
 	mmap_read_unlock(mm);
-	new_page = khugepaged_alloc_page(hpage, gfp, node);
-	if (!new_page) {
-		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
-		goto out_nolock;
-	}
 
-	if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
-		result = SCAN_CGROUP_CHARGE_FAIL;
+	result = alloc_charge_hpage(hpage, mm, cc);
+	if (result != SCAN_SUCCEED)
 		goto out_nolock;
-	}
-	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
+
+	new_page = *hpage;
 
 	mmap_read_lock(mm);
 	result = hugepage_vma_revalidate(mm, address, &vma);
@@ -1306,10 +1309,9 @@  static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
 out_unmap:
 	pte_unmap_unlock(pte, ptl);
 	if (ret) {
-		node = khugepaged_find_target_node(cc);
 		/* collapse_huge_page will return with the mmap_lock released */
-		collapse_huge_page(mm, address, hpage, node,
-				referenced, unmapped);
+		collapse_huge_page(mm, address, hpage, referenced, unmapped,
+				   cc);
 	}
 out:
 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
@@ -1578,7 +1580,7 @@  static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
  * @file: file that collapse on
  * @start: collapse start address
  * @hpage: new allocated huge page for collapse
- * @node: appointed node the new huge page allocate from
+ * @cc: collapse context and scratchpad
  *
  * Basic scheme is simple, details are more complex:
  *  - allocate and lock a new huge page;
@@ -1595,12 +1597,11 @@  static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
  *    + restore gaps in the page cache;
  *    + unlock and free huge page;
  */
-static void collapse_file(struct mm_struct *mm,
-		struct file *file, pgoff_t start,
-		struct page **hpage, int node)
+static void collapse_file(struct mm_struct *mm, struct file *file,
+			  pgoff_t start, struct page **hpage,
+			  struct collapse_control *cc)
 {
 	struct address_space *mapping = file->f_mapping;
-	gfp_t gfp;
 	struct page *new_page;
 	pgoff_t index, end = start + HPAGE_PMD_NR;
 	LIST_HEAD(pagelist);
@@ -1612,20 +1613,11 @@  static void collapse_file(struct mm_struct *mm,
 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
 
-	/* Only allocate from the target node */
-	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
-
-	new_page = khugepaged_alloc_page(hpage, gfp, node);
-	if (!new_page) {
-		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
+	result = alloc_charge_hpage(hpage, mm, cc);
+	if (result != SCAN_SUCCEED)
 		goto out;
-	}
 
-	if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
-		result = SCAN_CGROUP_CHARGE_FAIL;
-		goto out;
-	}
-	count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
+	new_page = *hpage;
 
 	/*
 	 * Ensure we have slots for all the pages in the range.  This is
@@ -2037,8 +2029,7 @@  static void khugepaged_scan_file(struct mm_struct *mm, struct file *file,
 			result = SCAN_EXCEED_NONE_PTE;
 			count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
 		} else {
-			node = khugepaged_find_target_node(cc);
-			collapse_file(mm, file, start, hpage, node);
+			collapse_file(mm, file, start, hpage, cc);
 		}
 	}