diff mbox series

[4/4] mm/mempolicy: Convert alloc_pages_preferred_many() to return a folio

Message ID 20240805163120.5971-4-aruna.ramakrishna@oracle.com (mailing list archive)
State New
Headers show
Series [1/4] mm/mempolicy: Use folio_alloc_mpol_noprof() in alloc_pages_noprof() | expand

Commit Message

Aruna Ramakrishna Aug. 5, 2024, 4:31 p.m. UTC
There is only one caller of alloc_pages_preferred_many(), which
already expects a folio. Rename the function and convert the body
of alloc_pages_preferred_many() to work with folios too.

Signed-off-by: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
---
 mm/mempolicy.c | 18 ++++++++----------
 1 file changed, 8 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9be32c3bfff2..33074ffd59fe 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2185,10 +2185,10 @@  bool mempolicy_in_oom_domain(struct task_struct *tsk,
 	return ret;
 }
 
-static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
-						int nid, nodemask_t *nodemask)
+static struct folio *folio_alloc_preferred_many(gfp_t gfp, unsigned int order,
+		int nid, nodemask_t *nodemask)
 {
-	struct page *page;
+	struct folio *folio;
 	gfp_t preferred_gfp;
 
 	/*
@@ -2199,11 +2199,11 @@  static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
 	 */
 	preferred_gfp = gfp | __GFP_NOWARN;
 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
-	page = __alloc_pages_noprof(preferred_gfp, order, nid, nodemask);
-	if (!page)
-		page = __alloc_pages_noprof(gfp, order, nid, NULL);
+	folio = __folio_alloc_noprof(preferred_gfp, order, nid, nodemask);
+	if (!folio)
+		folio = __folio_alloc_noprof(gfp, order, nid, NULL);
 
-	return page;
+	return folio;
 }
 
 /**
@@ -2226,9 +2226,7 @@  struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
 	nodemask = policy_nodemask(gfp, pol, ilx, &nid);
 
 	if (pol->mode == MPOL_PREFERRED_MANY)
-		return page_rmappable_folio(
-				alloc_pages_preferred_many(gfp, order,
-					nid, nodemask));
+		return folio_alloc_preferred_many(gfp, order, nid, nodemask);
 
 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
 	    /* filter "hugepage" allocation, unless from alloc_pages() */