diff mbox series

[v2,14/16] mm/mempolicy: Add alloc_frozen_pages()

Message ID 20220809171854.3725722-15-willy@infradead.org (mailing list archive)
State New
Headers show
Series Allocate and free frozen pages | expand

Commit Message

Matthew Wilcox Aug. 9, 2022, 5:18 p.m. UTC
Provide an interface to allocate pages from the page allocator without
incrementing their refcount.  This saves an atomic operation on free,
which may be beneficial to some users (eg slab).

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
---
 mm/internal.h  |  9 ++++++++
 mm/mempolicy.c | 61 +++++++++++++++++++++++++++++++-------------------
 2 files changed, 47 insertions(+), 23 deletions(-)
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index 7e6079216a17..6f02bc32b406 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -367,6 +367,15 @@  struct page *__alloc_frozen_pages(gfp_t, unsigned int order, int nid,
 void free_frozen_pages(struct page *, unsigned int order);
 void free_unref_page_list(struct list_head *list);
 
+#ifdef CONFIG_NUMA
+struct page *alloc_frozen_pages(gfp_t, unsigned int order);
+#else
+static inline struct page *alloc_frozen_pages(gfp_t gfp, unsigned int order)
+{
+	return __alloc_frozen_pages(gfp, order, numa_node_id(), NULL);
+}
+#endif
+
 extern void zone_pcp_update(struct zone *zone, int cpu_online);
 extern void zone_pcp_reset(struct zone *zone);
 extern void zone_pcp_disable(struct zone *zone);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b73d3248d976..09ecc499d5fc 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2100,7 +2100,7 @@  static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
 {
 	struct page *page;
 
-	page = __alloc_pages(gfp, order, nid, NULL);
+	page = __alloc_frozen_pages(gfp, order, nid, NULL);
 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
 	if (!static_branch_likely(&vm_numa_stat_key))
 		return page;
@@ -2126,9 +2126,9 @@  static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
 	 */
 	preferred_gfp = gfp | __GFP_NOWARN;
 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
-	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
+	page = __alloc_frozen_pages(preferred_gfp, order, nid, &pol->nodes);
 	if (!page)
-		page = __alloc_pages(gfp, order, nid, NULL);
+		page = __alloc_frozen_pages(gfp, order, nid, NULL);
 
 	return page;
 }
@@ -2167,8 +2167,11 @@  struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
 		mpol_cond_put(pol);
 		gfp |= __GFP_COMP;
 		page = alloc_page_interleave(gfp, order, nid);
-		if (page && order > 1)
-			prep_transhuge_page(page);
+		if (page) {
+			set_page_refcounted(page);
+			if (order > 1)
+				prep_transhuge_page(page);
+		}
 		folio = (struct folio *)page;
 		goto out;
 	}
@@ -2180,8 +2183,11 @@  struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
 		gfp |= __GFP_COMP;
 		page = alloc_pages_preferred_many(gfp, order, node, pol);
 		mpol_cond_put(pol);
-		if (page && order > 1)
-			prep_transhuge_page(page);
+		if (page) {
+			set_page_refcounted(page);
+			if (order > 1)
+				prep_transhuge_page(page);
+		}
 		folio = (struct folio *)page;
 		goto out;
 	}
@@ -2235,21 +2241,7 @@  struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
 }
 EXPORT_SYMBOL(vma_alloc_folio);
 
-/**
- * alloc_pages - Allocate pages.
- * @gfp: GFP flags.
- * @order: Power of two of number of pages to allocate.
- *
- * Allocate 1 << @order contiguous pages.  The physical address of the
- * first page is naturally aligned (eg an order-3 allocation will be aligned
- * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
- * process is honoured when in process context.
- *
- * Context: Can be called from any context, providing the appropriate GFP
- * flags are used.
- * Return: The page on success or NULL if allocation fails.
- */
-struct page *alloc_pages(gfp_t gfp, unsigned order)
+struct page *alloc_frozen_pages(gfp_t gfp, unsigned order)
 {
 	struct mempolicy *pol = &default_policy;
 	struct page *page;
@@ -2267,12 +2259,35 @@  struct page *alloc_pages(gfp_t gfp, unsigned order)
 		page = alloc_pages_preferred_many(gfp, order,
 				  policy_node(gfp, pol, numa_node_id()), pol);
 	else
-		page = __alloc_pages(gfp, order,
+		page = __alloc_frozen_pages(gfp, order,
 				policy_node(gfp, pol, numa_node_id()),
 				policy_nodemask(gfp, pol));
 
 	return page;
 }
+
+/**
+ * alloc_pages - Allocate pages.
+ * @gfp: GFP flags.
+ * @order: Power of two of number of pages to allocate.
+ *
+ * Allocate 1 << @order contiguous pages.  The physical address of the
+ * first page is naturally aligned (eg an order-3 allocation will be aligned
+ * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
+ * process is honoured when in process context.
+ *
+ * Context: Can be called from any context, providing the appropriate GFP
+ * flags are used.
+ * Return: The page on success or NULL if allocation fails.
+ */
+struct page *alloc_pages(gfp_t gfp, unsigned order)
+{
+	struct page *page = alloc_frozen_pages(gfp, order);
+
+	if (page)
+		set_page_refcounted(page);
+	return page;
+}
 EXPORT_SYMBOL(alloc_pages);
 
 struct folio *folio_alloc(gfp_t gfp, unsigned order)