@@ -1522,7 +1522,7 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
return result;
}
-static struct page *shmem_alloc_hugepage(gfp_t gfp,
+static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
struct vm_area_struct pvma;
@@ -1540,7 +1540,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
shmem_pseudo_vma_destroy(&pvma);
if (!folio)
count_vm_event(THP_FILE_FALLBACK);
- return &folio->page;
+ return folio;
}
static struct folio *shmem_alloc_folio(gfp_t gfp,
@@ -1561,7 +1561,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
pgoff_t index, bool huge)
{
struct shmem_inode_info *info = SHMEM_I(inode);
- struct page *page;
+ struct folio *folio;
int nr;
int err = -ENOSPC;
@@ -1573,13 +1573,13 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
goto failed;
if (huge)
- page = shmem_alloc_hugepage(gfp, info, index);
+ folio = shmem_alloc_hugefolio(gfp, info, index);
else
- page = &shmem_alloc_folio(gfp, info, index)->page;
- if (page) {
- __SetPageLocked(page);
- __SetPageSwapBacked(page);
- return page;
+ folio = shmem_alloc_folio(gfp, info, index);
+ if (folio) {
+ __folio_set_locked(folio);
+ __folio_set_swapbacked(folio);
+ return &folio->page;
}
err = -ENOMEM;
Convert shmem_alloc_hugepage() to return the folio that it uses and use a folio throughout shmem_alloc_and_acct_page(). Continue to return a page from shmem_alloc_and_acct_page() for now. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/shmem.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-)