diff mbox series

[v2,4/6] shmem: add order parameter support to shmem_alloc_folio

Message ID 20230919135536.2165715-5-da.gomez@samsung.com (mailing list archive)
State New, archived
Headers show
Series [v2,1/6] shmem: drop BLOCKS_PER_PAGE macro | expand

Commit Message

Daniel Gomez Sept. 19, 2023, 1:55 p.m. UTC
In preparation for high order folio support for the write path, add
order parameter when allocating a folio. This is on the write path
when huge support is not enabled or when it is but the huge page
allocation fails, the fallback will take advantage of this too.

Use order 0 for the non write paths such as reads or swap in as these
currently lack high order folios support.

Signed-off-by: Daniel Gomez <da.gomez@samsung.com>
---
 mm/shmem.c | 22 +++++++++++++---------
 1 file changed, 13 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/mm/shmem.c b/mm/shmem.c
index d41ee5983fd4..66d94207b40c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1667,20 +1667,21 @@  static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
 }
 
 static struct folio *shmem_alloc_folio(gfp_t gfp,
-			struct shmem_inode_info *info, pgoff_t index)
+			struct shmem_inode_info *info, pgoff_t index,
+			unsigned int order)
 {
 	struct vm_area_struct pvma;
 	struct folio *folio;
 
 	shmem_pseudo_vma_init(&pvma, info, index);
-	folio = vma_alloc_folio(gfp, 0, &pvma, 0, false);
+	folio = vma_alloc_folio(gfp, order, &pvma, 0, false);
 	shmem_pseudo_vma_destroy(&pvma);
 
 	return folio;
 }
 
 static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
-		pgoff_t index, bool huge)
+		pgoff_t index, bool huge, unsigned int *order)
 {
 	struct shmem_inode_info *info = SHMEM_I(inode);
 	struct folio *folio;
@@ -1689,7 +1690,7 @@  static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
 
 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
 		huge = false;
-	nr = huge ? HPAGE_PMD_NR : 1;
+	nr = huge ? HPAGE_PMD_NR : 1U << *order;
 
 	err = shmem_inode_acct_block(inode, nr);
 	if (err)
@@ -1698,7 +1699,7 @@  static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
 	if (huge)
 		folio = shmem_alloc_hugefolio(gfp, info, index);
 	else
-		folio = shmem_alloc_folio(gfp, info, index);
+		folio = shmem_alloc_folio(gfp, info, index, *order);
 	if (folio) {
 		__folio_set_locked(folio);
 		__folio_set_swapbacked(folio);
@@ -1748,7 +1749,7 @@  static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
 	 */
 	gfp &= ~GFP_CONSTRAINT_MASK;
 	VM_BUG_ON_FOLIO(folio_test_large(old), old);
-	new = shmem_alloc_folio(gfp, info, index);
+	new = shmem_alloc_folio(gfp, info, index, 0);
 	if (!new)
 		return -ENOMEM;
 
@@ -1959,6 +1960,7 @@  static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
 	int error;
 	int once = 0;
 	int alloced = 0;
+	unsigned int order = 0;
 
 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
 		return -EFBIG;
@@ -2034,10 +2036,12 @@  static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
 
 	huge_gfp = vma_thp_gfp_mask(vma);
 	huge_gfp = limit_gfp_mask(huge_gfp, gfp);
-	folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true);
+	folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true,
+					   &order);
 	if (IS_ERR(folio)) {
 alloc_nohuge:
-		folio = shmem_alloc_and_acct_folio(gfp, inode, index, false);
+		folio = shmem_alloc_and_acct_folio(gfp, inode, index, false,
+						   &order);
 	}
 	if (IS_ERR(folio)) {
 		int retry = 5;
@@ -2600,7 +2604,7 @@  int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
 
 	if (!*foliop) {
 		ret = -ENOMEM;
-		folio = shmem_alloc_folio(gfp, info, pgoff);
+		folio = shmem_alloc_folio(gfp, info, pgoff, 0);
 		if (!folio)
 			goto out_unacct_blocks;