diff mbox series

[v2,02/17] memblock: Declare scratch memory as CMA

Message ID 20231222193607.15474-3-graf@amazon.com (mailing list archive)
State Superseded
Headers show
Series kexec: Allow preservation of ftrace buffers | expand

Commit Message

Alexander Graf Dec. 22, 2023, 7:35 p.m. UTC
When we finish populating our memory, we don't want to lose the scratch
region as memory we can use for useful data. Do do that, we mark it as
CMA memory. That means that any allocation within it only happens with
movable memory which we can then happily discard for the next kexec.

That way we don't lose the scratch region's memory anymore for
allocations after boot.

Signed-off-by: Alexander Graf <graf@amazon.com>

---

v1 -> v2:

  - test bot warning fix
---
 mm/memblock.c | 30 ++++++++++++++++++++++++++----
 1 file changed, 26 insertions(+), 4 deletions(-)

Comments

Stanislav Kinsburskii Jan. 1, 2024, 3:01 a.m. UTC | #1
On Fri, Dec 22, 2023 at 07:35:52PM +0000, Alexander Graf wrote:
> When we finish populating our memory, we don't want to lose the scratch
> region as memory we can use for useful data. Do do that, we mark it as
> CMA memory. That means that any allocation within it only happens with
> movable memory which we can then happily discard for the next kexec.
> 
> That way we don't lose the scratch region's memory anymore for
> allocations after boot.
> 
> Signed-off-by: Alexander Graf <graf@amazon.com>
> 
> ---
> 
> v1 -> v2:
> 
>   - test bot warning fix
> ---
>  mm/memblock.c | 30 ++++++++++++++++++++++++++----
>  1 file changed, 26 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/memblock.c b/mm/memblock.c
> index e89e6c8f9d75..3700c2c1a96d 100644
> --- a/mm/memblock.c
> +++ b/mm/memblock.c
> @@ -16,6 +16,7 @@
>  #include <linux/kmemleak.h>
>  #include <linux/seq_file.h>
>  #include <linux/memblock.h>
> +#include <linux/page-isolation.h>
>  
>  #include <asm/sections.h>
>  #include <linux/io.h>
> @@ -1100,10 +1101,6 @@ static bool should_skip_region(struct memblock_type *type,
>  	if ((flags & MEMBLOCK_SCRATCH) && !memblock_is_scratch(m))
>  		return true;
>  
> -	/* Leave scratch memory alone after scratch-only phase */
> -	if (!(flags & MEMBLOCK_SCRATCH) && memblock_is_scratch(m))
> -		return true;
> -
>  	return false;
>  }
>  
> @@ -2153,6 +2150,20 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
>  	}
>  }
>  
> +#ifdef CONFIG_MEMBLOCK_SCRATCH
> +static void reserve_scratch_mem(phys_addr_t start, phys_addr_t end)

nit: the function name doesn't look reasonable as it has nothing
limiting it to neither reservation nor scratch mem.
Perhaps something like "set_mem_cma_type" would be a better fit.

> +{
> +	ulong start_pfn = pageblock_start_pfn(PFN_DOWN(start));
> +	ulong end_pfn = pageblock_align(PFN_UP(end));
> +	ulong pfn;
> +
> +	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
> +		/* Mark as CMA to prevent kernel allocations in it */

nit: the comment above looks irrelevant/redundant.

> +		set_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_CMA);
> +	}
> +}
> +#endif
> +
>  static unsigned long __init __free_memory_core(phys_addr_t start,
>  				 phys_addr_t end)
>  {
> @@ -2214,6 +2225,17 @@ static unsigned long __init free_low_memory_core_early(void)
>  
>  	memmap_init_reserved_pages();
>  
> +#ifdef CONFIG_MEMBLOCK_SCRATCH
> +	/*
> +	 * Mark scratch mem as CMA before we return it. That way we ensure that
> +	 * no kernel allocations happen on it. That means we can reuse it as
> +	 * scratch memory again later.
> +	 */
> +	__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
> +			     MEMBLOCK_SCRATCH, &start, &end, NULL)
> +		reserve_scratch_mem(start, end);
> +#endif
> +
>  	/*
>  	 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
>  	 *  because in some case like Node0 doesn't have RAM installed
> -- 
> 2.40.1
> 
> 
> 
> 
> Amazon Development Center Germany GmbH
> Krausenstr. 38
> 10117 Berlin
> Geschaeftsfuehrung: Christian Schlaeger, Jonathan Weiss
> Eingetragen am Amtsgericht Charlottenburg unter HRB 149173 B
> Sitz: Berlin
> Ust-ID: DE 289 237 879
> 
>
diff mbox series

Patch

diff --git a/mm/memblock.c b/mm/memblock.c
index e89e6c8f9d75..3700c2c1a96d 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -16,6 +16,7 @@ 
 #include <linux/kmemleak.h>
 #include <linux/seq_file.h>
 #include <linux/memblock.h>
+#include <linux/page-isolation.h>
 
 #include <asm/sections.h>
 #include <linux/io.h>
@@ -1100,10 +1101,6 @@  static bool should_skip_region(struct memblock_type *type,
 	if ((flags & MEMBLOCK_SCRATCH) && !memblock_is_scratch(m))
 		return true;
 
-	/* Leave scratch memory alone after scratch-only phase */
-	if (!(flags & MEMBLOCK_SCRATCH) && memblock_is_scratch(m))
-		return true;
-
 	return false;
 }
 
@@ -2153,6 +2150,20 @@  static void __init __free_pages_memory(unsigned long start, unsigned long end)
 	}
 }
 
+#ifdef CONFIG_MEMBLOCK_SCRATCH
+static void reserve_scratch_mem(phys_addr_t start, phys_addr_t end)
+{
+	ulong start_pfn = pageblock_start_pfn(PFN_DOWN(start));
+	ulong end_pfn = pageblock_align(PFN_UP(end));
+	ulong pfn;
+
+	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
+		/* Mark as CMA to prevent kernel allocations in it */
+		set_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_CMA);
+	}
+}
+#endif
+
 static unsigned long __init __free_memory_core(phys_addr_t start,
 				 phys_addr_t end)
 {
@@ -2214,6 +2225,17 @@  static unsigned long __init free_low_memory_core_early(void)
 
 	memmap_init_reserved_pages();
 
+#ifdef CONFIG_MEMBLOCK_SCRATCH
+	/*
+	 * Mark scratch mem as CMA before we return it. That way we ensure that
+	 * no kernel allocations happen on it. That means we can reuse it as
+	 * scratch memory again later.
+	 */
+	__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
+			     MEMBLOCK_SCRATCH, &start, &end, NULL)
+		reserve_scratch_mem(start, end);
+#endif
+
 	/*
 	 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
 	 *  because in some case like Node0 doesn't have RAM installed