diff mbox series

[v14,07/11] arm64: kdump: introduce some macroes for crash kernel reservation

Message ID 20210130071025.65258-8-chenzhou10@huawei.com (mailing list archive)
State New, archived
Headers show
Series support reserving crashkernel above 4G on arm64 kdump | expand

Commit Message

chenzhou Jan. 30, 2021, 7:10 a.m. UTC
Introduce macro CRASH_ALIGN for alignment, macro CRASH_ADDR_LOW_MAX
for upper bound of low crash memory, macro CRASH_ADDR_HIGH_MAX for
upper bound of high crash memory, use macroes instead.

Besides, keep consistent with x86, use CRASH_ALIGN as the lower bound
of crash kernel reservation.

Signed-off-by: Chen Zhou <chenzhou10@huawei.com>
Tested-by: John Donnelly <John.p.donnelly@oracle.com>
---
 arch/arm64/include/asm/kexec.h | 6 ++++++
 arch/arm64/mm/init.c           | 6 +++---
 2 files changed, 9 insertions(+), 3 deletions(-)

Comments

Nicolas Saenz Julienne Feb. 4, 2021, 4:20 p.m. UTC | #1
Hi Chen,

On Sat, 2021-01-30 at 15:10 +0800, Chen Zhou wrote:
> Introduce macro CRASH_ALIGN for alignment, macro CRASH_ADDR_LOW_MAX
> for upper bound of low crash memory, macro CRASH_ADDR_HIGH_MAX for
> upper bound of high crash memory, use macroes instead.
> 
> Besides, keep consistent with x86, use CRASH_ALIGN as the lower bound
> of crash kernel reservation.
> 
> Signed-off-by: Chen Zhou <chenzhou10@huawei.com>
> Tested-by: John Donnelly <John.p.donnelly@oracle.com>
> ---
>  arch/arm64/include/asm/kexec.h | 6 ++++++
>  arch/arm64/mm/init.c           | 6 +++---
>  2 files changed, 9 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
> index d24b527e8c00..3f6ecae0bc68 100644
> --- a/arch/arm64/include/asm/kexec.h
> +++ b/arch/arm64/include/asm/kexec.h
> @@ -25,6 +25,12 @@
>  
> 
>  #define KEXEC_ARCH KEXEC_ARCH_AARCH64
>  
> 
> +/* 2M alignment for crash kernel regions */
> +#define CRASH_ALIGN	SZ_2M
> +
> +#define CRASH_ADDR_LOW_MAX	arm64_dma_phys_limit

I wonder if you could use 'ARCH_LOW_ADDRESS_LIMIT', instead of creating a new
define.

> +#define CRASH_ADDR_HIGH_MAX	MEMBLOCK_ALLOC_ACCESSIBLE
> +
>  #ifndef __ASSEMBLY__
>  
> 
>  /**
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index 709d98fea90c..912f64f505f7 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -84,8 +84,8 @@ static void __init reserve_crashkernel(void)
>  
> 
>  	if (crash_base == 0) {
>  		/* Current arm64 boot protocol requires 2MB alignment */
> -		crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
> -				crash_size, SZ_2M);
> +		crash_base = memblock_find_in_range(CRASH_ALIGN, CRASH_ADDR_LOW_MAX,
> +				crash_size, CRASH_ALIGN);

Actually we could get rid of CRASH_ADDR_LOW_MAX altogether if we used
memblock_alloc_low() here (modulo the slight refactoring needed to accommodate
it).

Regards,
Nicolas
Nicolas Saenz Julienne Feb. 4, 2021, 4:27 p.m. UTC | #2
On Thu, 2021-02-04 at 17:20 +0100, Nicolas Saenz Julienne wrote:
> Hi Chen,
> 
> On Sat, 2021-01-30 at 15:10 +0800, Chen Zhou wrote:
> > Introduce macro CRASH_ALIGN for alignment, macro CRASH_ADDR_LOW_MAX
> > for upper bound of low crash memory, macro CRASH_ADDR_HIGH_MAX for
> > upper bound of high crash memory, use macroes instead.
> > 
> > Besides, keep consistent with x86, use CRASH_ALIGN as the lower bound
> > of crash kernel reservation.
> > 
> > Signed-off-by: Chen Zhou <chenzhou10@huawei.com>
> > Tested-by: John Donnelly <John.p.donnelly@oracle.com>
> > ---
> >  arch/arm64/include/asm/kexec.h | 6 ++++++
> >  arch/arm64/mm/init.c           | 6 +++---
> >  2 files changed, 9 insertions(+), 3 deletions(-)
> > 
> > diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
> > index d24b527e8c00..3f6ecae0bc68 100644
> > --- a/arch/arm64/include/asm/kexec.h
> > +++ b/arch/arm64/include/asm/kexec.h
> > @@ -25,6 +25,12 @@
> >  
> > 
> >  #define KEXEC_ARCH KEXEC_ARCH_AARCH64
> >  
> > 
> > +/* 2M alignment for crash kernel regions */
> > +#define CRASH_ALIGN	SZ_2M
> > +
> > +#define CRASH_ADDR_LOW_MAX	arm64_dma_phys_limit
> 
> I wonder if you could use 'ARCH_LOW_ADDRESS_LIMIT', instead of creating a new
> define.
> 
> > +#define CRASH_ADDR_HIGH_MAX	MEMBLOCK_ALLOC_ACCESSIBLE
> > +
> >  #ifndef __ASSEMBLY__
> >  
> > 
> >  /**
> > diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> > index 709d98fea90c..912f64f505f7 100644
> > --- a/arch/arm64/mm/init.c
> > +++ b/arch/arm64/mm/init.c
> > @@ -84,8 +84,8 @@ static void __init reserve_crashkernel(void)
> >  
> > 
> >  	if (crash_base == 0) {
> >  		/* Current arm64 boot protocol requires 2MB alignment */
> > -		crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
> > -				crash_size, SZ_2M);
> > +		crash_base = memblock_find_in_range(CRASH_ALIGN, CRASH_ADDR_LOW_MAX,
> > +				crash_size, CRASH_ALIGN);
> 
> Actually we could get rid of CRASH_ADDR_LOW_MAX altogether if we used
> memblock_alloc_low() here (modulo the slight refactoring needed to accommodate
> it).

Forget about these coments, I now see that you're deleting this whole function
on the next patch and defaulting to a generic implementation. Sorry for the
noise.

Regards,
Nicolas
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index d24b527e8c00..3f6ecae0bc68 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -25,6 +25,12 @@ 
 
 #define KEXEC_ARCH KEXEC_ARCH_AARCH64
 
+/* 2M alignment for crash kernel regions */
+#define CRASH_ALIGN	SZ_2M
+
+#define CRASH_ADDR_LOW_MAX	arm64_dma_phys_limit
+#define CRASH_ADDR_HIGH_MAX	MEMBLOCK_ALLOC_ACCESSIBLE
+
 #ifndef __ASSEMBLY__
 
 /**
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 709d98fea90c..912f64f505f7 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -84,8 +84,8 @@  static void __init reserve_crashkernel(void)
 
 	if (crash_base == 0) {
 		/* Current arm64 boot protocol requires 2MB alignment */
-		crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
-				crash_size, SZ_2M);
+		crash_base = memblock_find_in_range(CRASH_ALIGN, CRASH_ADDR_LOW_MAX,
+				crash_size, CRASH_ALIGN);
 		if (crash_base == 0) {
 			pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
 				crash_size);
@@ -103,7 +103,7 @@  static void __init reserve_crashkernel(void)
 			return;
 		}
 
-		if (!IS_ALIGNED(crash_base, SZ_2M)) {
+		if (!IS_ALIGNED(crash_base, CRASH_ALIGN)) {
 			pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
 			return;
 		}