diff mbox series

[v2,06/15] mm: Pass order to alloc_pages_vma in GFP flags

Message ID 20190510135038.17129-7-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Remove 'order' argument from many mm functions | expand

Commit Message

Matthew Wilcox (Oracle) May 10, 2019, 1:50 p.m. UTC
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

Matches the change to the __alloc_pages_nodemask API.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/gfp.h | 20 ++++++++++----------
 mm/mempolicy.c      | 15 +++++++--------
 mm/shmem.c          |  5 +++--
 3 files changed, 20 insertions(+), 20 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 94ba8a6172e4..6133f77abc91 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -518,24 +518,24 @@  alloc_pages(gfp_t gfp_mask, unsigned int order)
 {
 	return alloc_pages_current(gfp_mask | __GFP_ORDER(order));
 }
-extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
-			struct vm_area_struct *vma, unsigned long addr,
-			int node, bool hugepage);
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
-	alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
+extern struct page *alloc_pages_vma(gfp_t gfp, struct vm_area_struct *vma,
+		unsigned long addr, int node, bool hugepage);
+#define alloc_hugepage_vma(gfp, vma, addr, order) \
+	alloc_pages_vma(gfp | __GFP_ORDER(order), vma, addr, numa_node_id(), \
+			true)
 #else
 #define alloc_pages(gfp_mask, order) \
-		alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
-	alloc_pages(gfp_mask, order)
+	alloc_pages_node(numa_node_id(), gfp_mask, order)
+#define alloc_pages_vma(gfp, vma, addr, node, false) \
+	alloc_pages(gfp, 0)
 #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
 	alloc_pages(gfp_mask, order)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
 #define alloc_page_vma(gfp_mask, vma, addr)			\
-	alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
+	alloc_pages_vma(gfp_mask, vma, addr, numa_node_id(), false)
 #define alloc_page_vma_node(gfp_mask, vma, addr, node)		\
-	alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
+	alloc_pages_vma(gfp_mask, vma, addr, node, false)
 
 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index eec0b9c21962..e81d4a94878b 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2032,7 +2032,6 @@  static struct page *alloc_page_interleave(gfp_t gfp, unsigned nid)
  *      %GFP_FS      allocation should not call back into a file system.
  *      %GFP_ATOMIC  don't sleep.
  *
- *	@order:Order of the GFP allocation.
  * 	@vma:  Pointer to VMA or NULL if not available.
  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
  *	@node: Which node to prefer for allocation (modulo policy).
@@ -2046,8 +2045,8 @@  static struct page *alloc_page_interleave(gfp_t gfp, unsigned nid)
  *	NULL when no page can be allocated.
  */
 struct page *
-alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
-		unsigned long addr, int node, bool hugepage)
+alloc_pages_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr,
+		int node, bool hugepage)
 {
 	struct mempolicy *pol;
 	struct page *page;
@@ -2059,9 +2058,10 @@  alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 	if (pol->mode == MPOL_INTERLEAVE) {
 		unsigned nid;
 
-		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
+		nid = interleave_nid(pol, vma, addr,
+				PAGE_SHIFT + gfp_order(gfp));
 		mpol_cond_put(pol);
-		page = alloc_page_interleave(gfp | __GFP_ORDER(order), nid);
+		page = alloc_page_interleave(gfp, nid);
 		goto out;
 	}
 
@@ -2085,15 +2085,14 @@  alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 		if (!nmask || node_isset(hpage_node, *nmask)) {
 			mpol_cond_put(pol);
 			page = __alloc_pages_node(hpage_node,
-						gfp | __GFP_THISNODE, order);
+						gfp | __GFP_THISNODE, 0);
 			goto out;
 		}
 	}
 
 	nmask = policy_nodemask(gfp, pol);
 	preferred_nid = policy_node(gfp, pol, node);
-	page = __alloc_pages_nodemask(gfp | __GFP_ORDER(order), preferred_nid,
-			nmask);
+	page = __alloc_pages_nodemask(gfp, preferred_nid, nmask);
 	mpol_cond_put(pol);
 out:
 	return page;
diff --git a/mm/shmem.c b/mm/shmem.c
index 1bb3b8dc8bb2..fdbab5dbf1fd 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1463,8 +1463,9 @@  static struct page *shmem_alloc_hugepage(gfp_t gfp,
 		return NULL;
 
 	shmem_pseudo_vma_init(&pvma, info, hindex);
-	page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
-			HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
+	page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY |
+					__GFP_NOWARN | __GFP_PMD,
+			&pvma, 0, numa_node_id(), true);
 	shmem_pseudo_vma_destroy(&pvma);
 	if (page)
 		prep_transhuge_page(page);