diff mbox series

[v2,7/9] mm/hugetlb_cgroup: convert hugetlb_cgroup_uncharge_page() to folios

Message ID 20221101223059.460937-8-sidhartha.kumar@oracle.com (mailing list archive)
State New
Headers show
Series convert hugetlb_cgroup helper functions to folios | expand

Commit Message

Sidhartha Kumar Nov. 1, 2022, 10:30 p.m. UTC
Continue to use a folio inside free_huge_page() by converting
hugetlb_cgroup_uncharge_page*() to folios.

Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
---
 include/linux/hugetlb_cgroup.h | 16 ++++++++--------
 mm/hugetlb.c                   | 15 +++++++++------
 mm/hugetlb_cgroup.c            | 21 ++++++++++-----------
 3 files changed, 27 insertions(+), 25 deletions(-)

Comments

Muchun Song Nov. 2, 2022, 6:56 a.m. UTC | #1
> On Nov 2, 2022, at 06:30, Sidhartha Kumar <sidhartha.kumar@oracle.com> wrote:
> 
> Continue to use a folio inside free_huge_page() by converting
> hugetlb_cgroup_uncharge_page*() to folios.
> 
> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>


Reviewed-by: Muchun Song <songmuchun@bytedance.com>

A nit below.

> ---
> include/linux/hugetlb_cgroup.h | 16 ++++++++--------
> mm/hugetlb.c                   | 15 +++++++++------
> mm/hugetlb_cgroup.c            | 21 ++++++++++-----------
> 3 files changed, 27 insertions(+), 25 deletions(-)
> 
> diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
> index 789b6fef176d..c70f92fe493e 100644
> --- a/include/linux/hugetlb_cgroup.h
> +++ b/include/linux/hugetlb_cgroup.h
> @@ -158,10 +158,10 @@ extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
> extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
>      struct hugetlb_cgroup *h_cg,
>      struct page *page);
> -extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
> - struct page *page);
> -extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
> -      struct page *page);
> +extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
> + struct folio *folio);
> +extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
> +      struct folio *folio);
> 
> extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
>   struct hugetlb_cgroup *h_cg);
> @@ -254,14 +254,14 @@ hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
> {
> }
> 
> -static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
> - struct page *page)
> +static inline void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
> + struct folio *folio)
> {
> }
> 
> -static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
> +static inline void hugetlb_cgroup_uncharge_folio_rsvd(int idx,
>     unsigned long nr_pages,
> -     struct page *page)
> +     struct folio *folio)
> {
> }
> static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 387b8d74107d..2ecc0a6cf883 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1726,10 +1726,10 @@ void free_huge_page(struct page *page)
> 
> spin_lock_irqsave(&hugetlb_lock, flags);
> folio_clear_hugetlb_migratable(folio);
> - hugetlb_cgroup_uncharge_page(hstate_index(h),
> -     pages_per_huge_page(h), page);
> - hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
> -  pages_per_huge_page(h), page);
> + hugetlb_cgroup_uncharge_folio(hstate_index(h),
> +     pages_per_huge_page(h), folio);
> + hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
> +  pages_per_huge_page(h), folio);
> if (restore_reserve)
> h->resv_huge_pages++;
> 
> @@ -2855,6 +2855,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
> struct hugepage_subpool *spool = subpool_vma(vma);
> struct hstate *h = hstate_vma(vma);
> struct page *page;
> + struct folio *folio;
> long map_chg, map_commit;
> long gbl_chg;
> int ret, idx;
> @@ -2918,6 +2919,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
> * a reservation exists for the allocation.
> */
> page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
> +

Redundant blank line.

> if (!page) {
> spin_unlock_irq(&hugetlb_lock);
> page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
> @@ -2932,6 +2934,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
> set_page_refcounted(page);
> /* Fall through */
> }
> + folio = page_folio(page);
> hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
> /* If allocation is not consuming a reservation, also store the
> * hugetlb_cgroup pointer on the page.
> @@ -2961,8 +2964,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
> rsv_adjust = hugepage_subpool_put_pages(spool, 1);
> hugetlb_acct_memory(h, -rsv_adjust);
> if (deferred_reserve)
> - hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
> - pages_per_huge_page(h), page);
> + hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
> + pages_per_huge_page(h), folio);
> }
> return page;
> 
> diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
> index 351ffb40261c..7793401acc12 100644
> --- a/mm/hugetlb_cgroup.c
> +++ b/mm/hugetlb_cgroup.c
> @@ -349,11 +349,10 @@ void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
> /*
>  * Should be called with hugetlb_lock held
>  */
> -static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
> -   struct page *page, bool rsvd)
> +static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
> +   struct folio *folio, bool rsvd)
> {
> struct hugetlb_cgroup *h_cg;
> - struct folio *folio = page_folio(page);
> 
> if (hugetlb_cgroup_disabled())
> return;
> @@ -371,27 +370,27 @@ static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
> css_put(&h_cg->css);
> else {
> unsigned long usage =
> - h_cg->nodeinfo[page_to_nid(page)]->usage[idx];
> + h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
> /*
> * This write is not atomic due to fetching usage and writing
> * to it, but that's fine because we call this with
> * hugetlb_lock held anyway.
> */
> - WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx],
> + WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
>   usage - nr_pages);
> }
> }
> 
> -void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
> -  struct page *page)
> +void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
> +  struct folio *folio)
> {
> - __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false);
> + __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, false);
> }
> 
> -void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
> -       struct page *page)
> +void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
> +       struct folio *folio)
> {
> - __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true);
> + __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, true);
> }
> 
> static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
> -- 
> 2.31.1
> 
>
diff mbox series

Patch

diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 789b6fef176d..c70f92fe493e 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -158,10 +158,10 @@  extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
 extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
 					      struct hugetlb_cgroup *h_cg,
 					      struct page *page);
-extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
-					 struct page *page);
-extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
-					      struct page *page);
+extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
+					 struct folio *folio);
+extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
+					      struct folio *folio);
 
 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
 					   struct hugetlb_cgroup *h_cg);
@@ -254,14 +254,14 @@  hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
 {
 }
 
-static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
-						struct page *page)
+static inline void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
+						struct folio *folio)
 {
 }
 
-static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
+static inline void hugetlb_cgroup_uncharge_folio_rsvd(int idx,
 						     unsigned long nr_pages,
-						     struct page *page)
+						     struct folio *folio)
 {
 }
 static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 387b8d74107d..2ecc0a6cf883 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1726,10 +1726,10 @@  void free_huge_page(struct page *page)
 
 	spin_lock_irqsave(&hugetlb_lock, flags);
 	folio_clear_hugetlb_migratable(folio);
-	hugetlb_cgroup_uncharge_page(hstate_index(h),
-				     pages_per_huge_page(h), page);
-	hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
-					  pages_per_huge_page(h), page);
+	hugetlb_cgroup_uncharge_folio(hstate_index(h),
+				     pages_per_huge_page(h), folio);
+	hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
+					  pages_per_huge_page(h), folio);
 	if (restore_reserve)
 		h->resv_huge_pages++;
 
@@ -2855,6 +2855,7 @@  struct page *alloc_huge_page(struct vm_area_struct *vma,
 	struct hugepage_subpool *spool = subpool_vma(vma);
 	struct hstate *h = hstate_vma(vma);
 	struct page *page;
+	struct folio *folio;
 	long map_chg, map_commit;
 	long gbl_chg;
 	int ret, idx;
@@ -2918,6 +2919,7 @@  struct page *alloc_huge_page(struct vm_area_struct *vma,
 	 * a reservation exists for the allocation.
 	 */
 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
+
 	if (!page) {
 		spin_unlock_irq(&hugetlb_lock);
 		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
@@ -2932,6 +2934,7 @@  struct page *alloc_huge_page(struct vm_area_struct *vma,
 		set_page_refcounted(page);
 		/* Fall through */
 	}
+	folio = page_folio(page);
 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
 	/* If allocation is not consuming a reservation, also store the
 	 * hugetlb_cgroup pointer on the page.
@@ -2961,8 +2964,8 @@  struct page *alloc_huge_page(struct vm_area_struct *vma,
 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
 		hugetlb_acct_memory(h, -rsv_adjust);
 		if (deferred_reserve)
-			hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
-					pages_per_huge_page(h), page);
+			hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
+					pages_per_huge_page(h), folio);
 	}
 	return page;
 
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 351ffb40261c..7793401acc12 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -349,11 +349,10 @@  void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
 /*
  * Should be called with hugetlb_lock held
  */
-static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
-					   struct page *page, bool rsvd)
+static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
+					   struct folio *folio, bool rsvd)
 {
 	struct hugetlb_cgroup *h_cg;
-	struct folio *folio = page_folio(page);
 
 	if (hugetlb_cgroup_disabled())
 		return;
@@ -371,27 +370,27 @@  static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
 		css_put(&h_cg->css);
 	else {
 		unsigned long usage =
-			h_cg->nodeinfo[page_to_nid(page)]->usage[idx];
+			h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
 		/*
 		 * This write is not atomic due to fetching usage and writing
 		 * to it, but that's fine because we call this with
 		 * hugetlb_lock held anyway.
 		 */
-		WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx],
+		WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
 			   usage - nr_pages);
 	}
 }
 
-void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
-				  struct page *page)
+void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
+				  struct folio *folio)
 {
-	__hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false);
+	__hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, false);
 }
 
-void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
-				       struct page *page)
+void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
+				       struct folio *folio)
 {
-	__hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true);
+	__hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, true);
 }
 
 static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,