diff mbox series

[mm-unstable,6/8] mm/hugetlb: convert alloc_migrate_huge_page to folios

Message ID 20230103191340.116536-7-sidhartha.kumar@oracle.com (mailing list archive)
State New
Headers show
Series continue hugetlb folio conversions | expand

Commit Message

Sidhartha Kumar Jan. 3, 2023, 7:13 p.m. UTC
Change alloc_huge_page_nodemask() to alloc_hugetlb_folio_nodemask() and
alloc_migrate_huge_page() to alloc_migrate_hugetlb_folio(). Both functions
now return a folio rather than a page.

Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
---
 include/linux/hugetlb.h |  6 +++---
 mm/hugetlb.c            | 18 +++++++++---------
 mm/migrate.c            |  5 ++++-
 3 files changed, 16 insertions(+), 13 deletions(-)

Comments

Mike Kravetz Jan. 7, 2023, 12:54 a.m. UTC | #1
On 01/03/23 13:13, Sidhartha Kumar wrote:
> Change alloc_huge_page_nodemask() to alloc_hugetlb_folio_nodemask() and
> alloc_migrate_huge_page() to alloc_migrate_hugetlb_folio(). Both functions
> now return a folio rather than a page.

>  /* mempolicy aware migration callback */
> @@ -2357,16 +2357,16 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
>  {
>  	struct mempolicy *mpol;
>  	nodemask_t *nodemask;
> -	struct page *page;
> +	struct folio *folio;
>  	gfp_t gfp_mask;
>  	int node;
>  
>  	gfp_mask = htlb_alloc_mask(h);
>  	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
> -	page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
> +	folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
>  	mpol_cond_put(mpol);
>  
> -	return page;
> +	return &folio->page;

Is it possible that folio could be NULL here and cause addressing exception?

> diff --git a/mm/migrate.c b/mm/migrate.c
> index 6932b3d5a9dd..fab706b78be1 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1622,6 +1622,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
>  	struct migration_target_control *mtc;
>  	gfp_t gfp_mask;
>  	unsigned int order = 0;
> +	struct folio *hugetlb_folio = NULL;
>  	struct folio *new_folio = NULL;
>  	int nid;
>  	int zidx;
> @@ -1636,7 +1637,9 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
>  		struct hstate *h = folio_hstate(folio);
>  
>  		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
> -		return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
> +		hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
> +						mtc->nmask, gfp_mask);
> +		return &hugetlb_folio->page;

and, here as well?
Sidhartha Kumar Jan. 9, 2023, 4:26 p.m. UTC | #2
On 1/6/23 6:54 PM, Mike Kravetz wrote:
> On 01/03/23 13:13, Sidhartha Kumar wrote:
>> Change alloc_huge_page_nodemask() to alloc_hugetlb_folio_nodemask() and
>> alloc_migrate_huge_page() to alloc_migrate_hugetlb_folio(). Both functions
>> now return a folio rather than a page.
> 
>>   /* mempolicy aware migration callback */
>> @@ -2357,16 +2357,16 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
>>   {
>>   	struct mempolicy *mpol;
>>   	nodemask_t *nodemask;
>> -	struct page *page;
>> +	struct folio *folio;
>>   	gfp_t gfp_mask;
>>   	int node;
>>   
>>   	gfp_mask = htlb_alloc_mask(h);
>>   	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
>> -	page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
>> +	folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
>>   	mpol_cond_put(mpol);
>>   
>> -	return page;
>> +	return &folio->page;
> 
> Is it possible that folio could be NULL here and cause addressing exception?
> 
>> diff --git a/mm/migrate.c b/mm/migrate.c
>> index 6932b3d5a9dd..fab706b78be1 100644
>> --- a/mm/migrate.c
>> +++ b/mm/migrate.c
>> @@ -1622,6 +1622,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
>>   	struct migration_target_control *mtc;
>>   	gfp_t gfp_mask;
>>   	unsigned int order = 0;
>> +	struct folio *hugetlb_folio = NULL;
>>   	struct folio *new_folio = NULL;
>>   	int nid;
>>   	int zidx;
>> @@ -1636,7 +1637,9 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
>>   		struct hstate *h = folio_hstate(folio);
>>   
>>   		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
>> -		return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
>> +		hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
>> +						mtc->nmask, gfp_mask);
>> +		return &hugetlb_folio->page;
> 
> and, here as well?

Hi Mike,

It is possible that the folio could be null but I believe these 
instances would not cause an addressing exception because as described 
in [1], &folio->page is safe even if the folio is NULL as the page 
offset is at 0.


[1] https://lore.kernel.org/lkml/Y7h4jsv6jl0XSIsk@casper.infradead.org/T/
Mike Kravetz Jan. 9, 2023, 6:21 p.m. UTC | #3
On 01/09/23 10:26, Sidhartha Kumar wrote:
> On 1/6/23 6:54 PM, Mike Kravetz wrote:
> > On 01/03/23 13:13, Sidhartha Kumar wrote:
> > > Change alloc_huge_page_nodemask() to alloc_hugetlb_folio_nodemask() and
> > > alloc_migrate_huge_page() to alloc_migrate_hugetlb_folio(). Both functions
> > > now return a folio rather than a page.
> > 
> > >   /* mempolicy aware migration callback */
> > > @@ -2357,16 +2357,16 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
> > >   {
> > >   	struct mempolicy *mpol;
> > >   	nodemask_t *nodemask;
> > > -	struct page *page;
> > > +	struct folio *folio;
> > >   	gfp_t gfp_mask;
> > >   	int node;
> > >   	gfp_mask = htlb_alloc_mask(h);
> > >   	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
> > > -	page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
> > > +	folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
> > >   	mpol_cond_put(mpol);
> > > -	return page;
> > > +	return &folio->page;
> > 
> > Is it possible that folio could be NULL here and cause addressing exception?
> > 
> > > diff --git a/mm/migrate.c b/mm/migrate.c
> > > index 6932b3d5a9dd..fab706b78be1 100644
> > > --- a/mm/migrate.c
> > > +++ b/mm/migrate.c
> > > @@ -1622,6 +1622,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
> > >   	struct migration_target_control *mtc;
> > >   	gfp_t gfp_mask;
> > >   	unsigned int order = 0;
> > > +	struct folio *hugetlb_folio = NULL;
> > >   	struct folio *new_folio = NULL;
> > >   	int nid;
> > >   	int zidx;
> > > @@ -1636,7 +1637,9 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
> > >   		struct hstate *h = folio_hstate(folio);
> > >   		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
> > > -		return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
> > > +		hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
> > > +						mtc->nmask, gfp_mask);
> > > +		return &hugetlb_folio->page;
> > 
> > and, here as well?
> 
> Hi Mike,
> 
> It is possible that the folio could be null but I believe these instances
> would not cause an addressing exception because as described in [1],
> &folio->page is safe even if the folio is NULL as the page offset is at 0.
> 
> 
> [1] https://lore.kernel.org/lkml/Y7h4jsv6jl0XSIsk@casper.infradead.org/T/

Thanks!
I did not follow that thread and did not look closely as to whether
&folio->page was safe with folio == NULL.

I must say that it is going to take me some time to not pause and think
when coming upon &folio->page.  Perhaps that is good.

Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
diff mbox series

Patch

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 482929b2d044..a853c13d8308 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -680,7 +680,7 @@  struct huge_bootmem_page {
 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
 struct page *alloc_huge_page(struct vm_area_struct *vma,
 				unsigned long addr, int avoid_reserve);
-struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
+struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
 				nodemask_t *nmask, gfp_t gfp_mask);
 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
 				unsigned long address);
@@ -1001,8 +1001,8 @@  static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
 	return NULL;
 }
 
-static inline struct page *
-alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
+static inline struct folio *
+alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
 			nodemask_t *nmask, gfp_t gfp_mask)
 {
 	return NULL;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 640ca4eaccf2..0db01718d1c3 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2278,7 +2278,7 @@  static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
 	return folio;
 }
 
-static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
+static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
 				     int nid, nodemask_t *nmask)
 {
 	struct folio *folio;
@@ -2298,7 +2298,7 @@  static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
 	 */
 	folio_set_hugetlb_temporary(folio);
 
-	return &folio->page;
+	return folio;
 }
 
 /*
@@ -2331,8 +2331,8 @@  struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
 	return folio;
 }
 
-/* page migration callback function */
-struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
+/* folio migration callback function */
+struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
 		nodemask_t *nmask, gfp_t gfp_mask)
 {
 	spin_lock_irq(&hugetlb_lock);
@@ -2343,12 +2343,12 @@  struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
 						preferred_nid, nmask);
 		if (folio) {
 			spin_unlock_irq(&hugetlb_lock);
-			return &folio->page;
+			return folio;
 		}
 	}
 	spin_unlock_irq(&hugetlb_lock);
 
-	return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
+	return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
 }
 
 /* mempolicy aware migration callback */
@@ -2357,16 +2357,16 @@  struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
 {
 	struct mempolicy *mpol;
 	nodemask_t *nodemask;
-	struct page *page;
+	struct folio *folio;
 	gfp_t gfp_mask;
 	int node;
 
 	gfp_mask = htlb_alloc_mask(h);
 	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
-	page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
+	folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
 	mpol_cond_put(mpol);
 
-	return page;
+	return &folio->page;
 }
 
 /*
diff --git a/mm/migrate.c b/mm/migrate.c
index 6932b3d5a9dd..fab706b78be1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1622,6 +1622,7 @@  struct page *alloc_migration_target(struct page *page, unsigned long private)
 	struct migration_target_control *mtc;
 	gfp_t gfp_mask;
 	unsigned int order = 0;
+	struct folio *hugetlb_folio = NULL;
 	struct folio *new_folio = NULL;
 	int nid;
 	int zidx;
@@ -1636,7 +1637,9 @@  struct page *alloc_migration_target(struct page *page, unsigned long private)
 		struct hstate *h = folio_hstate(folio);
 
 		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
-		return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
+		hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
+						mtc->nmask, gfp_mask);
+		return &hugetlb_folio->page;
 	}
 
 	if (folio_test_large(folio)) {