@@ -1543,17 +1543,17 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
return &folio->page;
}
-static struct page *shmem_alloc_page(gfp_t gfp,
+static struct folio *shmem_alloc_folio(gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
struct vm_area_struct pvma;
- struct page *page;
+ struct folio *folio;
shmem_pseudo_vma_init(&pvma, info, index);
- page = alloc_page_vma(gfp, &pvma, 0);
+ folio = vma_alloc_folio(gfp, 0, &pvma, 0, false);
shmem_pseudo_vma_destroy(&pvma);
- return page;
+ return folio;
}
static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
@@ -1575,7 +1575,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
if (huge)
page = shmem_alloc_hugepage(gfp, info, index);
else
- page = shmem_alloc_page(gfp, info, index);
+ page = &shmem_alloc_folio(gfp, info, index)->page;
if (page) {
__SetPageLocked(page);
__SetPageSwapBacked(page);
@@ -1625,7 +1625,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
* limit chance of success by further cpuset and node constraints.
*/
gfp &= ~GFP_CONSTRAINT_MASK;
- newpage = shmem_alloc_page(gfp, info, index);
+ newpage = &shmem_alloc_folio(gfp, info, index)->page;
if (!newpage)
return -ENOMEM;
@@ -2350,7 +2350,6 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
if (!*pagep) {
ret = -ENOMEM;
- page = shmem_alloc_page(gfp, info, pgoff);
if (!page)
goto out_unacct_blocks;
Call vma_alloc_folio() directly instead of alloc_page_vma(). It's a bit messy in the callers, but they're about to be cleaned up when they get converted to folios. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/shmem.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-)