@@ -2234,8 +2234,8 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
/*
* Allocates a fresh surplus page from the page allocator.
*/
-static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
- int nid, nodemask_t *nmask)
+static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
+ gfp_t gfp_mask, int nid, nodemask_t *nmask)
{
struct folio *folio = NULL;
@@ -2272,7 +2272,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
out_unlock:
spin_unlock_irq(&hugetlb_lock);
- return &folio->page;
+ return folio;
}
static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
@@ -2305,7 +2305,7 @@ static
struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct mempolicy *mpol;
gfp_t gfp_mask = htlb_alloc_mask(h);
int nid;
@@ -2316,16 +2316,16 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
gfp_t gfp = gfp_mask | __GFP_NOWARN;
gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
- page = alloc_surplus_huge_page(h, gfp, nid, nodemask);
+ folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
/* Fallback to all nodes if page==NULL */
nodemask = NULL;
}
- if (!page)
- page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
+ if (!folio)
+ folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
mpol_cond_put(mpol);
- return page;
+ return &folio->page;
}
/* page migration callback function */
@@ -2374,6 +2374,7 @@ static int gather_surplus_pages(struct hstate *h, long delta)
__must_hold(&hugetlb_lock)
{
LIST_HEAD(surplus_list);
+ struct folio *folio;
struct page *page, *tmp;
int ret;
long i;
@@ -2393,13 +2394,13 @@ static int gather_surplus_pages(struct hstate *h, long delta)
retry:
spin_unlock_irq(&hugetlb_lock);
for (i = 0; i < needed; i++) {
- page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
+ folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
NUMA_NO_NODE, NULL);
- if (!page) {
+ if (!folio) {
alloc_ok = false;
break;
}
- list_add(&page->lru, &surplus_list);
+ list_add(&folio->lru, &surplus_list);
cond_resched();
}
allocated += i;
@@ -3352,7 +3353,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
* First take pages out of surplus state. Then make up the
* remaining difference by allocating fresh huge pages.
*
- * We might race with alloc_surplus_huge_page() here and be unable
+ * We might race with alloc_surplus_hugetlb_folio() here and be unable
* to convert a surplus huge page to a normal huge page. That is
* not critical, though, it just means the overall size of the
* pool might be one hugepage larger than it needs to be, but
@@ -3395,7 +3396,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
* By placing pages into the surplus state independent of the
* overcommit value, we are allowing the surplus pool size to
* exceed overcommit. There are few sane options here. Since
- * alloc_surplus_huge_page() is checking the global counter,
+ * alloc_surplus_hugetlb_folio() is checking the global counter,
* though, we'll note that we're not allowed to exceed surplus
* and won't grow the pool anywhere else. Not until one of the
* sysctls are changed, or the surplus pages go out of use.