diff mbox series

[23/27] mm/cma: simplify zone intersection check

Message ID 20250127232207.3888640-24-fvdl@google.com (mailing list archive)
State New
Headers show
Series hugetlb/CMA improvements for large systems | expand

Commit Message

Frank van der Linden Jan. 27, 2025, 11:22 p.m. UTC
cma_activate_area walks all pages in the area, checking
their zone individually to see if the area resides in
more than one zone.

Make this a little more efficient by using the recently
introduced pfn_range_intersects_zones() function. Store
the NUMA node id (if any) in the cma structure to facilitate
this.

Signed-off-by: Frank van der Linden <fvdl@google.com>
---
 mm/cma.c | 13 ++++++-------
 mm/cma.h |  2 ++
 2 files changed, 8 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/mm/cma.c b/mm/cma.c
index 1704d5be6a07..6ad631c9fdca 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -103,7 +103,6 @@  static void __init cma_activate_area(struct cma *cma)
 {
 	unsigned long pfn, base_pfn;
 	int allocrange, r;
-	struct zone *zone;
 	struct cma_memrange *cmr;
 
 	for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
@@ -124,12 +123,8 @@  static void __init cma_activate_area(struct cma *cma)
 		 * CMA resv range to be in the same zone.
 		 */
 		WARN_ON_ONCE(!pfn_valid(base_pfn));
-		zone = page_zone(pfn_to_page(base_pfn));
-		for (pfn = base_pfn + 1; pfn < base_pfn + cmr->count; pfn++) {
-			WARN_ON_ONCE(!pfn_valid(pfn));
-			if (page_zone(pfn_to_page(pfn)) != zone)
-				goto cleanup;
-		}
+		if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count))
+			goto cleanup;
 
 		for (pfn = base_pfn; pfn < base_pfn + cmr->count;
 		     pfn += pageblock_nr_pages)
@@ -261,6 +256,7 @@  int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 	cma->ranges[0].base_pfn = PFN_DOWN(base);
 	cma->ranges[0].count = cma->count;
 	cma->nranges = 1;
+	cma->nid = NUMA_NO_NODE;
 
 	*res_cma = cma;
 
@@ -497,6 +493,7 @@  int __init cma_declare_contiguous_multi(phys_addr_t total_size,
 	}
 
 	cma->nranges = nr;
+	cma->nid = nid;
 	*res_cma = cma;
 
 out:
@@ -684,6 +681,8 @@  static int __init __cma_declare_contiguous_nid(phys_addr_t base,
 	if (ret)
 		memblock_phys_free(base, size);
 
+	(*res_cma)->nid = nid;
+
 	return ret;
 }
 
diff --git a/mm/cma.h b/mm/cma.h
index 601af7cdb495..b70a6c763f7d 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -48,6 +48,8 @@  struct cma {
 	struct cma_kobject *cma_kobj;
 #endif
 	bool reserve_pages_on_error;
+	/* NUMA node (NUMA_NO_NODE if unspecified) */
+	int nid;
 };
 
 extern struct cma cma_areas[MAX_CMA_AREAS];