diff mbox series

[v22,4/9] arm64: kdump: Don't force page-level mappings for memory above 4G

Message ID 20220414115720.1887-5-thunder.leizhen@huawei.com (mailing list archive)
State New, archived
Headers show
Series support reserving crashkernel above 4G on arm64 kdump | expand

Commit Message

Zhen Lei April 14, 2022, 11:57 a.m. UTC
If the crashkernel reservation is deferred, such boundaries are not known
when the linear mapping is created. But its upper limit is fixed, cannot
above 4G. Therefore, unless otherwise required, block mapping should be
used for memory above 4G to improve performance.

Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
---
 arch/arm64/mm/mmu.c | 24 +++++++++++++++++++++---
 1 file changed, 21 insertions(+), 3 deletions(-)

Comments

Catalin Marinas April 26, 2022, 2:26 p.m. UTC | #1
On Thu, Apr 14, 2022 at 07:57:15PM +0800, Zhen Lei wrote:
> @@ -540,13 +540,31 @@ static void __init map_mem(pgd_t *pgdp)
>  	for_each_mem_range(i, &start, &end) {
>  		if (start >= end)
>  			break;
> +
> +#ifdef CONFIG_KEXEC_CORE
> +		if (eflags && (end >= SZ_4G)) {
> +			/*
> +			 * The memory block cross the 4G boundary.
> +			 * Forcibly use page-level mappings for memory under 4G.
> +			 */
> +			if (start < SZ_4G) {
> +				__map_memblock(pgdp, start, SZ_4G - 1,
> +					       pgprot_tagged(PAGE_KERNEL), flags | eflags);
> +				start  = SZ_4G;
> +			}
> +
> +			/* Page-level mappings is not mandatory for memory above 4G */
> +			eflags = 0;
> +		}
> +#endif

That's a bit tricky if a SoC has all RAM above 4G. IIRC AMD Seattle had
this layout. See max_zone_phys() for how we deal with this, basically
extending ZONE_DMA to the whole range if RAM starts above 4GB. In that
case, crashkernel reservation would fall in the range above 4GB.

BTW, we changed the max_zone_phys() logic with commit 791ab8b2e3db
("arm64: Ignore any DMA offsets in the max_zone_phys() calculation").
Zhen Lei April 27, 2022, 7:12 a.m. UTC | #2
On 2022/4/26 22:26, Catalin Marinas wrote:
> On Thu, Apr 14, 2022 at 07:57:15PM +0800, Zhen Lei wrote:
>> @@ -540,13 +540,31 @@ static void __init map_mem(pgd_t *pgdp)
>>  	for_each_mem_range(i, &start, &end) {
>>  		if (start >= end)
>>  			break;
>> +
>> +#ifdef CONFIG_KEXEC_CORE
>> +		if (eflags && (end >= SZ_4G)) {
>> +			/*
>> +			 * The memory block cross the 4G boundary.
>> +			 * Forcibly use page-level mappings for memory under 4G.
>> +			 */
>> +			if (start < SZ_4G) {
>> +				__map_memblock(pgdp, start, SZ_4G - 1,
>> +					       pgprot_tagged(PAGE_KERNEL), flags | eflags);
>> +				start  = SZ_4G;
>> +			}
>> +
>> +			/* Page-level mappings is not mandatory for memory above 4G */
>> +			eflags = 0;
>> +		}
>> +#endif
> 
> That's a bit tricky if a SoC has all RAM above 4G. IIRC AMD Seattle had
> this layout. See max_zone_phys() for how we deal with this, basically
> extending ZONE_DMA to the whole range if RAM starts above 4GB. In that
> case, crashkernel reservation would fall in the range above 4GB.
> 
> BTW, we changed the max_zone_phys() logic with commit 791ab8b2e3db
> ("arm64: Ignore any DMA offsets in the max_zone_phys() calculation").

Okay, thanks for your correction. I'll dig into it after I've done the original requirement.

>
diff mbox series

Patch

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 7666b4955e45cb3..8c6666cbc7f2216 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -504,7 +504,7 @@  static void __init map_mem(pgd_t *pgdp)
 	phys_addr_t kernel_start = __pa_symbol(_stext);
 	phys_addr_t kernel_end = __pa_symbol(__init_begin);
 	phys_addr_t start, end;
-	int flags = NO_EXEC_MAPPINGS;
+	int flags = NO_EXEC_MAPPINGS, eflags = 0;
 	u64 i;
 
 	/*
@@ -530,7 +530,7 @@  static void __init map_mem(pgd_t *pgdp)
 #ifdef CONFIG_KEXEC_CORE
 	if (crash_mem_map &&
 	    (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)))
-		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+		eflags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
 	else if (crashk_res.end)
 		memblock_mark_nomap(crashk_res.start,
 				    resource_size(&crashk_res));
@@ -540,13 +540,31 @@  static void __init map_mem(pgd_t *pgdp)
 	for_each_mem_range(i, &start, &end) {
 		if (start >= end)
 			break;
+
+#ifdef CONFIG_KEXEC_CORE
+		if (eflags && (end >= SZ_4G)) {
+			/*
+			 * The memory block cross the 4G boundary.
+			 * Forcibly use page-level mappings for memory under 4G.
+			 */
+			if (start < SZ_4G) {
+				__map_memblock(pgdp, start, SZ_4G - 1,
+					       pgprot_tagged(PAGE_KERNEL), flags | eflags);
+				start  = SZ_4G;
+			}
+
+			/* Page-level mappings is not mandatory for memory above 4G */
+			eflags = 0;
+		}
+#endif
+
 		/*
 		 * The linear map must allow allocation tags reading/writing
 		 * if MTE is present. Otherwise, it has the same attributes as
 		 * PAGE_KERNEL.
 		 */
 		__map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
-			       flags);
+			       flags | eflags);
 	}
 
 	/*