diff mbox series

mm/khugepaged: Cleanup memcg uncharge for failure path

Message ID 20230303151218.311015-1-peterx@redhat.com (mailing list archive)
State New
Headers show
Series mm/khugepaged: Cleanup memcg uncharge for failure path | expand

Commit Message

Peter Xu March 3, 2023, 3:12 p.m. UTC
Explicit memcg uncharging is not needed when the memcg accounting has the
same lifespan of the page/folio.  That becomes the case for khugepaged
after Yang & Zach's recent rework so the hpage will be allocated for each
collapse rather than being cached.

Cleanup the explicit memcg uncharge in khugepaged failure path and leave
that for put_page().

Suggested-by: Zach O'Keefe <zokeefe@google.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
 mm/khugepaged.c | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

Comments

Zach O'Keefe March 3, 2023, 3:22 p.m. UTC | #1
Thanks Peter!

On Mar 03 10:12, Peter Xu wrote:
> Explicit memcg uncharging is not needed when the memcg accounting has the
> same lifespan of the page/folio.  That becomes the case for khugepaged
> after Yang & Zach's recent rework so the hpage will be allocated for each
> collapse rather than being cached.
> 
> Cleanup the explicit memcg uncharge in khugepaged failure path and leave
> that for put_page().
> 
> Suggested-by: Zach O'Keefe <zokeefe@google.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>

Reviewed-by: Zach O'Keefe <zokeefe@google.com>

> ---
>  mm/khugepaged.c | 8 ++------
>  1 file changed, 2 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 941d1c7ea910..dd5a7d9bc593 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1230,10 +1230,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>  out_up_write:
>  	mmap_write_unlock(mm);
>  out_nolock:
> -	if (hpage) {
> -		mem_cgroup_uncharge(page_folio(hpage));
> +	if (hpage)
>  		put_page(hpage);
> -	}
>  	trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
>  	return result;
>  }
> @@ -2250,10 +2248,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
>  		unlock_page(hpage);
>  out:
>  	VM_BUG_ON(!list_empty(&pagelist));
> -	if (hpage) {
> -		mem_cgroup_uncharge(page_folio(hpage));
> +	if (hpage)
>  		put_page(hpage);
> -	}
>  
>  	trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
>  	return result;
> -- 
> 2.39.1
>
Yang Shi March 3, 2023, 7 p.m. UTC | #2
On Fri, Mar 3, 2023 at 7:12 AM Peter Xu <peterx@redhat.com> wrote:
>
> Explicit memcg uncharging is not needed when the memcg accounting has the
> same lifespan of the page/folio.  That becomes the case for khugepaged
> after Yang & Zach's recent rework so the hpage will be allocated for each
> collapse rather than being cached.
>
> Cleanup the explicit memcg uncharge in khugepaged failure path and leave
> that for put_page().

Thanks for the cleanup. Reviewed-by: Yang Shi <shy828301@gmail.com>

>
> Suggested-by: Zach O'Keefe <zokeefe@google.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
>  mm/khugepaged.c | 8 ++------
>  1 file changed, 2 insertions(+), 6 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 941d1c7ea910..dd5a7d9bc593 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1230,10 +1230,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>  out_up_write:
>         mmap_write_unlock(mm);
>  out_nolock:
> -       if (hpage) {
> -               mem_cgroup_uncharge(page_folio(hpage));
> +       if (hpage)
>                 put_page(hpage);
> -       }
>         trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
>         return result;
>  }
> @@ -2250,10 +2248,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
>                 unlock_page(hpage);
>  out:
>         VM_BUG_ON(!list_empty(&pagelist));
> -       if (hpage) {
> -               mem_cgroup_uncharge(page_folio(hpage));
> +       if (hpage)
>                 put_page(hpage);
> -       }
>
>         trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
>         return result;
> --
> 2.39.1
>
diff mbox series

Patch

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 941d1c7ea910..dd5a7d9bc593 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1230,10 +1230,8 @@  static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 out_up_write:
 	mmap_write_unlock(mm);
 out_nolock:
-	if (hpage) {
-		mem_cgroup_uncharge(page_folio(hpage));
+	if (hpage)
 		put_page(hpage);
-	}
 	trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
 	return result;
 }
@@ -2250,10 +2248,8 @@  static int collapse_file(struct mm_struct *mm, unsigned long addr,
 		unlock_page(hpage);
 out:
 	VM_BUG_ON(!list_empty(&pagelist));
-	if (hpage) {
-		mem_cgroup_uncharge(page_folio(hpage));
+	if (hpage)
 		put_page(hpage);
-	}
 
 	trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
 	return result;