diff mbox series

[5/5] arm64/mmu: simplify logic around crash kernel mapping in map_mem()

Message ID 20220819041156.873873-6-rppt@kernel.org (mailing list archive)
State New
Headers show
Series arm64/mm: remap crash kernel with base pages even if rodata_full disabled | expand

Commit Message

Mike Rapoport Aug. 19, 2022, 4:11 a.m. UTC
From: Mike Rapoport <rppt@linux.ibm.com>

The check for crashkernel command line parameter and presence of
CONFIG_ZONE_DMA[32] in mmu::map_mem() are not necessary because
crashk_res.end would be set by the time map_mem() runs only if
reserve_crashkernel() was called from arm64_memblock_init() and only if
there was proper crashkernel parameter in the command line.

Leave only check that crashk_res.end is non-zero to decide whether
crash kernel memory should be mapped with base pages.

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
---
 arch/arm64/mm/mmu.c | 44 ++++++++++++--------------------------------
 1 file changed, 12 insertions(+), 32 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 83f2f18f7f34..fa23cfa6b772 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -502,21 +502,6 @@  void __init mark_linear_text_alias_ro(void)
 			    PAGE_KERNEL_RO);
 }
 
-static bool crash_mem_map __initdata;
-
-static int __init enable_crash_mem_map(char *arg)
-{
-	/*
-	 * Proper parameter parsing is done by reserve_crashkernel(). We only
-	 * need to know if the linear map has to avoid block mappings so that
-	 * the crashkernel reservations can be unmapped later.
-	 */
-	crash_mem_map = true;
-
-	return 0;
-}
-early_param("crashkernel", enable_crash_mem_map);
-
 static void __init map_mem(pgd_t *pgdp)
 {
 	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
@@ -547,11 +532,9 @@  static void __init map_mem(pgd_t *pgdp)
 	memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
 
 #ifdef CONFIG_KEXEC_CORE
-	if (crash_mem_map && !have_zone_dma()) {
-		if (crashk_res.end)
-			memblock_mark_nomap(crashk_res.start,
-			    resource_size(&crashk_res));
-	}
+	if (crashk_res.end)
+		memblock_mark_nomap(crashk_res.start,
+				    resource_size(&crashk_res));
 #endif
 
 	/* map all the memory banks */
@@ -582,20 +565,17 @@  static void __init map_mem(pgd_t *pgdp)
 	memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
 
 	/*
-	 * Use page-level mappings here so that we can shrink the region
-	 * in page granularity and put back unused memory to buddy system
-	 * through /sys/kernel/kexec_crash_size interface.
+	 * Use page-level mappings here so that we can protect crash kernel
+	 * memory to allow post-mortem analysis when things go awry.
 	 */
 #ifdef CONFIG_KEXEC_CORE
-	if (crash_mem_map && !have_zone_dma()) {
-		if (crashk_res.end) {
-			__map_memblock(pgdp, crashk_res.start,
-				       crashk_res.end + 1,
-				       PAGE_KERNEL,
-				       NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
-			memblock_clear_nomap(crashk_res.start,
-					     resource_size(&crashk_res));
-		}
+	if (crashk_res.end) {
+		__map_memblock(pgdp, crashk_res.start,
+			       crashk_res.end + 1,
+			       PAGE_KERNEL,
+			       NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
+		memblock_clear_nomap(crashk_res.start,
+				     resource_size(&crashk_res));
 	}
 #endif
 }