diff mbox series

[3/9] mm/hugetlb_cgroup: convert set_hugetlb_cgroup*() to folios

Message ID 20221014031303.231740-4-sidhartha.kumar@oracle.com (mailing list archive)
State New
Headers show
Series convert hugetlb_cgroup helper functions to folios | expand

Commit Message

Sidhartha Kumar Oct. 14, 2022, 3:12 a.m. UTC
Allows __prep_new_huge_page() to operate on a folio by converting
set_hugetlb_cgroup*() to take in a folio.

Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
---
 include/linux/hugetlb_cgroup.h | 12 ++++++------
 mm/hugetlb.c                   | 33 +++++++++++++++++++--------------
 mm/hugetlb_cgroup.c            | 11 ++++++-----
 3 files changed, 31 insertions(+), 25 deletions(-)

Comments

Mike Kravetz Oct. 31, 2022, 4:38 p.m. UTC | #1
On 10/13/22 20:12, Sidhartha Kumar wrote:
> Allows __prep_new_huge_page() to operate on a folio by converting
> set_hugetlb_cgroup*() to take in a folio.
> 
> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1758,19 +1758,21 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
>  	h->nr_huge_pages_node[nid]++;
>  }
>  
> -static void __prep_new_huge_page(struct hstate *h, struct page *page)
> +static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
>  {
> -	hugetlb_vmemmap_optimize(h, page);
> -	INIT_LIST_HEAD(&page->lru);
> -	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
> -	hugetlb_set_page_subpool(page, NULL);
> -	set_hugetlb_cgroup(page, NULL);
> -	set_hugetlb_cgroup_rsvd(page, NULL);
> +	hugetlb_vmemmap_optimize(h, &folio->page);
> +	INIT_LIST_HEAD(&folio->lru);
> +	folio->_folio_dtor = HUGETLB_PAGE_DTOR;

Seems like we should have a routine 'set_folio_dtor' that has the same
functionality as set_compound_page_dtor.  Here, we loose the check for a
valid DTOR value (although not terribly valuable).

Not required for this patch, but something to note.

> +	hugetlb_set_folio_subpool(folio, NULL);
> +	set_hugetlb_cgroup(folio, NULL);
> +	set_hugetlb_cgroup_rsvd(folio, NULL);
>  }
>  
>  static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
>  {
> -	__prep_new_huge_page(h, page);
> +	struct folio *folio = page_folio(page);
> +
> +	__prep_new_hugetlb_folio(h, folio);
>  	spin_lock_irq(&hugetlb_lock);
>  	__prep_account_new_huge_page(h, nid);
>  	spin_unlock_irq(&hugetlb_lock);
> @@ -2731,8 +2733,10 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>  					struct list_head *list)
>  {
>  	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
> -	int nid = page_to_nid(old_page);
> +	struct folio *old_folio = page_folio(old_page);
> +	int nid = folio_nid(old_folio);
>  	struct page *new_page;
> +	struct folio *new_folio;
>  	int ret = 0;
>  
>  	/*
> @@ -2745,16 +2749,17 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>  	new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
>  	if (!new_page)
>  		return -ENOMEM;
> -	__prep_new_huge_page(h, new_page);
> +	new_folio = page_folio(new_page);
> +	__prep_new_hugetlb_folio(h, new_folio);
>  
>  retry:
>  	spin_lock_irq(&hugetlb_lock);
> -	if (!PageHuge(old_page)) {
> +	if (!folio_test_hugetlb(old_folio)) {
>  		/*
>  		 * Freed from under us. Drop new_page too.
>  		 */
>  		goto free_new;
> -	} else if (page_count(old_page)) {
> +	} else if (folio_ref_count(old_folio)) {
>  		/*
>  		 * Someone has grabbed the page, try to isolate it here.
>  		 * Fail with -EBUSY if not possible.
> @@ -2763,7 +2768,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>  		ret = isolate_hugetlb(old_page, list);
>  		spin_lock_irq(&hugetlb_lock);
>  		goto free_new;
> -	} else if (!HPageFreed(old_page)) {
> +	} else if (!folio_test_hugetlb(old_folio)) {

Should that be?
	} else if (!folio_test_hugetlb_freed(old_folio)) {
Sidhartha Kumar Nov. 1, 2022, 4:43 p.m. UTC | #2
On 10/31/22 9:38 AM, Mike Kravetz wrote:
> On 10/13/22 20:12, Sidhartha Kumar wrote:
>> Allows __prep_new_huge_page() to operate on a folio by converting
>> set_hugetlb_cgroup*() to take in a folio.
>>
>> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
>> --- a/mm/hugetlb.c
>> +++ b/mm/hugetlb.c
>> @@ -1758,19 +1758,21 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
>>   	h->nr_huge_pages_node[nid]++;
>>   }
>>   
>> -static void __prep_new_huge_page(struct hstate *h, struct page *page)
>> +static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
>>   {
>> -	hugetlb_vmemmap_optimize(h, page);
>> -	INIT_LIST_HEAD(&page->lru);
>> -	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
>> -	hugetlb_set_page_subpool(page, NULL);
>> -	set_hugetlb_cgroup(page, NULL);
>> -	set_hugetlb_cgroup_rsvd(page, NULL);
>> +	hugetlb_vmemmap_optimize(h, &folio->page);
>> +	INIT_LIST_HEAD(&folio->lru);
>> +	folio->_folio_dtor = HUGETLB_PAGE_DTOR;
> Seems like we should have a routine 'set_folio_dtor' that has the same
> functionality as set_compound_page_dtor.  Here, we loose the check for a
> valid DTOR value (although not terribly valuable).

I agree with the need for a 'set_folio_dtor' routine, I'll send out a 
patch for that as well.

> Not required for this patch, but something to note.
>
>> +	hugetlb_set_folio_subpool(folio, NULL);
>> +	set_hugetlb_cgroup(folio, NULL);
>> +	set_hugetlb_cgroup_rsvd(folio, NULL);
>>   }
>>   
>>   static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
>>   {
>> -	__prep_new_huge_page(h, page);
>> +	struct folio *folio = page_folio(page);
>> +
>> +	__prep_new_hugetlb_folio(h, folio);
>>   	spin_lock_irq(&hugetlb_lock);
>>   	__prep_account_new_huge_page(h, nid);
>>   	spin_unlock_irq(&hugetlb_lock);
>> @@ -2731,8 +2733,10 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>>   					struct list_head *list)
>>   {
>>   	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
>> -	int nid = page_to_nid(old_page);
>> +	struct folio *old_folio = page_folio(old_page);
>> +	int nid = folio_nid(old_folio);
>>   	struct page *new_page;
>> +	struct folio *new_folio;
>>   	int ret = 0;
>>   
>>   	/*
>> @@ -2745,16 +2749,17 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>>   	new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
>>   	if (!new_page)
>>   		return -ENOMEM;
>> -	__prep_new_huge_page(h, new_page);
>> +	new_folio = page_folio(new_page);
>> +	__prep_new_hugetlb_folio(h, new_folio);
>>   
>>   retry:
>>   	spin_lock_irq(&hugetlb_lock);
>> -	if (!PageHuge(old_page)) {
>> +	if (!folio_test_hugetlb(old_folio)) {
>>   		/*
>>   		 * Freed from under us. Drop new_page too.
>>   		 */
>>   		goto free_new;
>> -	} else if (page_count(old_page)) {
>> +	} else if (folio_ref_count(old_folio)) {
>>   		/*
>>   		 * Someone has grabbed the page, try to isolate it here.
>>   		 * Fail with -EBUSY if not possible.
>> @@ -2763,7 +2768,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>>   		ret = isolate_hugetlb(old_page, list);
>>   		spin_lock_irq(&hugetlb_lock);
>>   		goto free_new;
>> -	} else if (!HPageFreed(old_page)) {
>> +	} else if (!folio_test_hugetlb(old_folio)) {
> Should that be?
> 	} else if (!folio_test_hugetlb_freed(old_folio)) {

Yes good catch, I will fix in a v2.

Thanks,
Sidhartha Kumar

>
diff mbox series

Patch

diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index feb2edafc8b6..a7e3540f7f38 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -112,16 +112,16 @@  static inline void __set_hugetlb_cgroup(struct folio *folio,
 				 (unsigned long)h_cg);
 }
 
-static inline void set_hugetlb_cgroup(struct page *page,
+static inline void set_hugetlb_cgroup(struct folio *folio,
 				     struct hugetlb_cgroup *h_cg)
 {
-	__set_hugetlb_cgroup(page_folio(page), h_cg, false);
+	__set_hugetlb_cgroup(folio, h_cg, false);
 }
 
-static inline void set_hugetlb_cgroup_rsvd(struct page *page,
+static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
 					  struct hugetlb_cgroup *h_cg)
 {
-	__set_hugetlb_cgroup(page_folio(page), h_cg, true);
+	__set_hugetlb_cgroup(folio, h_cg, true);
 }
 
 static inline bool hugetlb_cgroup_disabled(void)
@@ -199,12 +199,12 @@  hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
 	return NULL;
 }
 
-static inline void set_hugetlb_cgroup(struct page *page,
+static inline void set_hugetlb_cgroup(struct folio *folio,
 				     struct hugetlb_cgroup *h_cg)
 {
 }
 
-static inline void set_hugetlb_cgroup_rsvd(struct page *page,
+static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
 					  struct hugetlb_cgroup *h_cg)
 {
 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bcb9bfce32ee..4d98bf7ba81c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1758,19 +1758,21 @@  static void __prep_account_new_huge_page(struct hstate *h, int nid)
 	h->nr_huge_pages_node[nid]++;
 }
 
-static void __prep_new_huge_page(struct hstate *h, struct page *page)
+static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
 {
-	hugetlb_vmemmap_optimize(h, page);
-	INIT_LIST_HEAD(&page->lru);
-	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
-	hugetlb_set_page_subpool(page, NULL);
-	set_hugetlb_cgroup(page, NULL);
-	set_hugetlb_cgroup_rsvd(page, NULL);
+	hugetlb_vmemmap_optimize(h, &folio->page);
+	INIT_LIST_HEAD(&folio->lru);
+	folio->_folio_dtor = HUGETLB_PAGE_DTOR;
+	hugetlb_set_folio_subpool(folio, NULL);
+	set_hugetlb_cgroup(folio, NULL);
+	set_hugetlb_cgroup_rsvd(folio, NULL);
 }
 
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 {
-	__prep_new_huge_page(h, page);
+	struct folio *folio = page_folio(page);
+
+	__prep_new_hugetlb_folio(h, folio);
 	spin_lock_irq(&hugetlb_lock);
 	__prep_account_new_huge_page(h, nid);
 	spin_unlock_irq(&hugetlb_lock);
@@ -2731,8 +2733,10 @@  static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
 					struct list_head *list)
 {
 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
-	int nid = page_to_nid(old_page);
+	struct folio *old_folio = page_folio(old_page);
+	int nid = folio_nid(old_folio);
 	struct page *new_page;
+	struct folio *new_folio;
 	int ret = 0;
 
 	/*
@@ -2745,16 +2749,17 @@  static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
 	new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
 	if (!new_page)
 		return -ENOMEM;
-	__prep_new_huge_page(h, new_page);
+	new_folio = page_folio(new_page);
+	__prep_new_hugetlb_folio(h, new_folio);
 
 retry:
 	spin_lock_irq(&hugetlb_lock);
-	if (!PageHuge(old_page)) {
+	if (!folio_test_hugetlb(old_folio)) {
 		/*
 		 * Freed from under us. Drop new_page too.
 		 */
 		goto free_new;
-	} else if (page_count(old_page)) {
+	} else if (folio_ref_count(old_folio)) {
 		/*
 		 * Someone has grabbed the page, try to isolate it here.
 		 * Fail with -EBUSY if not possible.
@@ -2763,7 +2768,7 @@  static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
 		ret = isolate_hugetlb(old_page, list);
 		spin_lock_irq(&hugetlb_lock);
 		goto free_new;
-	} else if (!HPageFreed(old_page)) {
+	} else if (!folio_test_hugetlb(old_folio)) {
 		/*
 		 * Page's refcount is 0 but it has not been enqueued in the
 		 * freelist yet. Race window is small, so we can succeed here if
@@ -2801,7 +2806,7 @@  static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
 free_new:
 	spin_unlock_irq(&hugetlb_lock);
 	/* Page has a zero ref count, but needs a ref to be freed */
-	set_page_refcounted(new_page);
+	folio_ref_unfreeze(new_folio, 1);
 	update_and_free_page(h, new_page, false);
 
 	return ret;
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 600c98560a0f..692b23b5d423 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -212,7 +212,7 @@  static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
 	/* Take the pages off the local counter */
 	page_counter_cancel(counter, nr_pages);
 
-	set_hugetlb_cgroup(page, parent);
+	set_hugetlb_cgroup(folio, parent);
 out:
 	return;
 }
@@ -894,6 +894,7 @@  void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
 	struct hugetlb_cgroup *h_cg_rsvd;
 	struct hstate *h = page_hstate(oldhpage);
 	struct folio *old_folio = page_folio(oldhpage);
+	struct folio *new_folio = page_folio(newhpage);
 
 	if (hugetlb_cgroup_disabled())
 		return;
@@ -901,12 +902,12 @@  void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
 	spin_lock_irq(&hugetlb_lock);
 	h_cg = hugetlb_cgroup_from_folio(old_folio);
 	h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
-	set_hugetlb_cgroup(oldhpage, NULL);
-	set_hugetlb_cgroup_rsvd(oldhpage, NULL);
+	set_hugetlb_cgroup(old_folio, NULL);
+	set_hugetlb_cgroup_rsvd(old_folio, NULL);
 
 	/* move the h_cg details to new cgroup */
-	set_hugetlb_cgroup(newhpage, h_cg);
-	set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd);
+	set_hugetlb_cgroup(new_folio, h_cg);
+	set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd);
 	list_move(&newhpage->lru, &h->hugepage_activelist);
 	spin_unlock_irq(&hugetlb_lock);
 	return;