diff mbox series

[v17,10/15] arm64: kexec: use ld script for relocation function

Message ID 20210916231325.125533-11-pasha.tatashin@soleen.com (mailing list archive)
State New, archived
Headers show
Series arm64: MMU enabled kexec relocation | expand

Commit Message

Pasha Tatashin Sept. 16, 2021, 11:13 p.m. UTC
Currently, relocation code declares start and end variables
which are used to compute its size.

The better way to do this is to use ld script incited, and put relocation
function in its own section.

Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/include/asm/sections.h   |  1 +
 arch/arm64/kernel/machine_kexec.c   | 16 ++++++----------
 arch/arm64/kernel/relocate_kernel.S | 15 ++-------------
 arch/arm64/kernel/vmlinux.lds.S     | 19 +++++++++++++++++++
 4 files changed, 28 insertions(+), 23 deletions(-)

Comments

Will Deacon Sept. 29, 2021, 12:45 p.m. UTC | #1
On Thu, Sep 16, 2021 at 07:13:20PM -0400, Pasha Tatashin wrote:
> Currently, relocation code declares start and end variables
> which are used to compute its size.
> 
> The better way to do this is to use ld script incited, and put relocation
> function in its own section.

"incited"? I don't understand ...

> 
> Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
> ---
>  arch/arm64/include/asm/sections.h   |  1 +
>  arch/arm64/kernel/machine_kexec.c   | 16 ++++++----------
>  arch/arm64/kernel/relocate_kernel.S | 15 ++-------------
>  arch/arm64/kernel/vmlinux.lds.S     | 19 +++++++++++++++++++
>  4 files changed, 28 insertions(+), 23 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
> index e4ad9db53af1..152cb35bf9df 100644
> --- a/arch/arm64/include/asm/sections.h
> +++ b/arch/arm64/include/asm/sections.h
> @@ -21,5 +21,6 @@ extern char __exittext_begin[], __exittext_end[];
>  extern char __irqentry_text_start[], __irqentry_text_end[];
>  extern char __mmuoff_data_start[], __mmuoff_data_end[];
>  extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
> +extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[];
>  
>  #endif /* __ASM_SECTIONS_H */
> diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
> index cf5d6f22a041..83da6045cd45 100644
> --- a/arch/arm64/kernel/machine_kexec.c
> +++ b/arch/arm64/kernel/machine_kexec.c
> @@ -21,14 +21,11 @@
>  #include <asm/mmu.h>
>  #include <asm/mmu_context.h>
>  #include <asm/page.h>
> +#include <asm/sections.h>
>  #include <asm/trans_pgd.h>
>  
>  #include "cpu-reset.h"
>  
> -/* Global variables for the arm64_relocate_new_kernel routine. */
> -extern const unsigned char arm64_relocate_new_kernel[];
> -extern const unsigned long arm64_relocate_new_kernel_size;
> -
>  /**
>   * kexec_image_info - For debugging output.
>   */
> @@ -163,6 +160,7 @@ static void *kexec_page_alloc(void *arg)
>  int machine_kexec_post_load(struct kimage *kimage)
>  {
>  	void *reloc_code = page_to_virt(kimage->control_code_page);
> +	long reloc_size;
>  	struct trans_pgd_info info = {
>  		.trans_alloc_page	= kexec_page_alloc,
>  		.trans_alloc_arg	= kimage,
> @@ -183,17 +181,15 @@ int machine_kexec_post_load(struct kimage *kimage)
>  			return rc;
>  	}
>  
> -	memcpy(reloc_code, arm64_relocate_new_kernel,
> -	       arm64_relocate_new_kernel_size);
> +	reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
> +	memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
>  	kimage->arch.kern_reloc = __pa(reloc_code);
>  
>  	/* Flush the reloc_code in preparation for its execution. */
>  	dcache_clean_inval_poc((unsigned long)reloc_code,
> -			       (unsigned long)reloc_code +
> -			       arm64_relocate_new_kernel_size);
> +			       (unsigned long)reloc_code +  reloc_size);

Extra whitespace.

>  	icache_inval_pou((uintptr_t)reloc_code,
> -			 (uintptr_t)reloc_code +
> -			 arm64_relocate_new_kernel_size);
> +			 (uintptr_t)reloc_code + reloc_size);
>  	kexec_list_flush(kimage);
>  	kexec_image_info(kimage);
>  
> diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
> index b4fb97312a80..9d2400855ee4 100644
> --- a/arch/arm64/kernel/relocate_kernel.S
> +++ b/arch/arm64/kernel/relocate_kernel.S
> @@ -15,6 +15,7 @@
>  #include <asm/sysreg.h>
>  #include <asm/virt.h>
>  
> +.pushsection    ".kexec_relocate.text", "ax"

Just use .section if you're putting the entire file in there?

>  /*
>   * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
>   *
> @@ -77,16 +78,4 @@ SYM_CODE_START(arm64_relocate_new_kernel)
>  	mov	x3, xzr
>  	br	x4				/* Jumps from el1 */
>  SYM_CODE_END(arm64_relocate_new_kernel)
> -
> -.align 3	/* To keep the 64-bit values below naturally aligned. */
> -
> -.Lcopy_end:
> -.org	KEXEC_CONTROL_PAGE_SIZE
> -
> -/*
> - * arm64_relocate_new_kernel_size - Number of bytes to copy to the
> - * control_code_page.
> - */
> -.globl arm64_relocate_new_kernel_size
> -arm64_relocate_new_kernel_size:
> -	.quad	.Lcopy_end - arm64_relocate_new_kernel
> +.popsection
> diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
> index f6b1a88245db..ab457b609e69 100644
> --- a/arch/arm64/kernel/vmlinux.lds.S
> +++ b/arch/arm64/kernel/vmlinux.lds.S
> @@ -63,6 +63,7 @@
>  #include <asm-generic/vmlinux.lds.h>
>  #include <asm/cache.h>
>  #include <asm/kernel-pgtable.h>
> +#include <asm/kexec.h>
>  #include <asm/memory.h>
>  #include <asm/page.h>
>  
> @@ -100,6 +101,16 @@ jiffies = jiffies_64;
>  #define HIBERNATE_TEXT
>  #endif
>  
> +#ifdef CONFIG_KEXEC_CORE
> +#define KEXEC_TEXT					\
> +	. = ALIGN(SZ_4K);				\
> +	__relocate_new_kernel_start = .;		\
> +	*(.kexec_relocate.text)				\
> +	__relocate_new_kernel_end = .;
> +#else
> +#define KEXEC_TEXT
> +#endif
> +
>  #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>  #define TRAMP_TEXT					\
>  	. = ALIGN(PAGE_SIZE);				\
> @@ -160,6 +171,7 @@ SECTIONS
>  			HYPERVISOR_TEXT
>  			IDMAP_TEXT
>  			HIBERNATE_TEXT
> +			KEXEC_TEXT
>  			TRAMP_TEXT
>  			*(.fixup)
>  			*(.gnu.warning)
> @@ -348,3 +360,10 @@ ASSERT(swapper_pg_dir - reserved_pg_dir == RESERVED_SWAPPER_OFFSET,
>  ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET,
>         "TRAMP_SWAPPER_OFFSET is wrong!")
>  #endif
> +
> +#ifdef CONFIG_KEXEC_CORE
> +/* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */
> +ASSERT(__relocate_new_kernel_end - (__relocate_new_kernel_start & ~(SZ_4K - 1))
> +	<= SZ_4K, "kexec relocation code is too big or misaligned")
> +ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is brokern")

Typo: "brokern",

Will
Pasha Tatashin Sept. 30, 2021, 3:57 a.m. UTC | #2
On Wed, Sep 29, 2021 at 8:45 AM Will Deacon <will@kernel.org> wrote:
>
> On Thu, Sep 16, 2021 at 07:13:20PM -0400, Pasha Tatashin wrote:
> > Currently, relocation code declares start and end variables
> > which are used to compute its size.
> >
> > The better way to do this is to use ld script incited, and put relocation
> > function in its own section.
>
> "incited"? I don't understand ...

I will correct it:
s/incited//


> > +#ifdef CONFIG_KEXEC_CORE
> > +/* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */
> > +ASSERT(__relocate_new_kernel_end - (__relocate_new_kernel_start & ~(SZ_4K - 1))
> > +     <= SZ_4K, "kexec relocation code is too big or misaligned")
> > +ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is brokern")
>
> Typo: "brokern",

Will correct it.

Thanks,
Pasha
Pasha Tatashin Sept. 30, 2021, 4:08 a.m. UTC | #3
Sorry, missed two comments:

> >       /* Flush the reloc_code in preparation for its execution. */
> >       dcache_clean_inval_poc((unsigned long)reloc_code,
> > -                            (unsigned long)reloc_code +
> > -                            arm64_relocate_new_kernel_size);
> > +                            (unsigned long)reloc_code +  reloc_size);
>
> Extra whitespace.

Yeap, extra whitespace after '+', will fix it :)

>
> >       icache_inval_pou((uintptr_t)reloc_code,
> > -                      (uintptr_t)reloc_code +
> > -                      arm64_relocate_new_kernel_size);
> > +                      (uintptr_t)reloc_code + reloc_size);
> >       kexec_list_flush(kimage);
> >       kexec_image_info(kimage);
> >
> > diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
> > index b4fb97312a80..9d2400855ee4 100644
> > --- a/arch/arm64/kernel/relocate_kernel.S
> > +++ b/arch/arm64/kernel/relocate_kernel.S
> > @@ -15,6 +15,7 @@
> >  #include <asm/sysreg.h>
> >  #include <asm/virt.h>
> >
> > +.pushsection    ".kexec_relocate.text", "ax"
>
> Just use .section if you're putting the entire file in there?

Good point, I will change it to .section.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index e4ad9db53af1..152cb35bf9df 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -21,5 +21,6 @@  extern char __exittext_begin[], __exittext_end[];
 extern char __irqentry_text_start[], __irqentry_text_end[];
 extern char __mmuoff_data_start[], __mmuoff_data_end[];
 extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
+extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[];
 
 #endif /* __ASM_SECTIONS_H */
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index cf5d6f22a041..83da6045cd45 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -21,14 +21,11 @@ 
 #include <asm/mmu.h>
 #include <asm/mmu_context.h>
 #include <asm/page.h>
+#include <asm/sections.h>
 #include <asm/trans_pgd.h>
 
 #include "cpu-reset.h"
 
-/* Global variables for the arm64_relocate_new_kernel routine. */
-extern const unsigned char arm64_relocate_new_kernel[];
-extern const unsigned long arm64_relocate_new_kernel_size;
-
 /**
  * kexec_image_info - For debugging output.
  */
@@ -163,6 +160,7 @@  static void *kexec_page_alloc(void *arg)
 int machine_kexec_post_load(struct kimage *kimage)
 {
 	void *reloc_code = page_to_virt(kimage->control_code_page);
+	long reloc_size;
 	struct trans_pgd_info info = {
 		.trans_alloc_page	= kexec_page_alloc,
 		.trans_alloc_arg	= kimage,
@@ -183,17 +181,15 @@  int machine_kexec_post_load(struct kimage *kimage)
 			return rc;
 	}
 
-	memcpy(reloc_code, arm64_relocate_new_kernel,
-	       arm64_relocate_new_kernel_size);
+	reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
+	memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
 	kimage->arch.kern_reloc = __pa(reloc_code);
 
 	/* Flush the reloc_code in preparation for its execution. */
 	dcache_clean_inval_poc((unsigned long)reloc_code,
-			       (unsigned long)reloc_code +
-			       arm64_relocate_new_kernel_size);
+			       (unsigned long)reloc_code +  reloc_size);
 	icache_inval_pou((uintptr_t)reloc_code,
-			 (uintptr_t)reloc_code +
-			 arm64_relocate_new_kernel_size);
+			 (uintptr_t)reloc_code + reloc_size);
 	kexec_list_flush(kimage);
 	kexec_image_info(kimage);
 
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index b4fb97312a80..9d2400855ee4 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -15,6 +15,7 @@ 
 #include <asm/sysreg.h>
 #include <asm/virt.h>
 
+.pushsection    ".kexec_relocate.text", "ax"
 /*
  * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
  *
@@ -77,16 +78,4 @@  SYM_CODE_START(arm64_relocate_new_kernel)
 	mov	x3, xzr
 	br	x4				/* Jumps from el1 */
 SYM_CODE_END(arm64_relocate_new_kernel)
-
-.align 3	/* To keep the 64-bit values below naturally aligned. */
-
-.Lcopy_end:
-.org	KEXEC_CONTROL_PAGE_SIZE
-
-/*
- * arm64_relocate_new_kernel_size - Number of bytes to copy to the
- * control_code_page.
- */
-.globl arm64_relocate_new_kernel_size
-arm64_relocate_new_kernel_size:
-	.quad	.Lcopy_end - arm64_relocate_new_kernel
+.popsection
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index f6b1a88245db..ab457b609e69 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -63,6 +63,7 @@ 
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/cache.h>
 #include <asm/kernel-pgtable.h>
+#include <asm/kexec.h>
 #include <asm/memory.h>
 #include <asm/page.h>
 
@@ -100,6 +101,16 @@  jiffies = jiffies_64;
 #define HIBERNATE_TEXT
 #endif
 
+#ifdef CONFIG_KEXEC_CORE
+#define KEXEC_TEXT					\
+	. = ALIGN(SZ_4K);				\
+	__relocate_new_kernel_start = .;		\
+	*(.kexec_relocate.text)				\
+	__relocate_new_kernel_end = .;
+#else
+#define KEXEC_TEXT
+#endif
+
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 #define TRAMP_TEXT					\
 	. = ALIGN(PAGE_SIZE);				\
@@ -160,6 +171,7 @@  SECTIONS
 			HYPERVISOR_TEXT
 			IDMAP_TEXT
 			HIBERNATE_TEXT
+			KEXEC_TEXT
 			TRAMP_TEXT
 			*(.fixup)
 			*(.gnu.warning)
@@ -348,3 +360,10 @@  ASSERT(swapper_pg_dir - reserved_pg_dir == RESERVED_SWAPPER_OFFSET,
 ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET,
        "TRAMP_SWAPPER_OFFSET is wrong!")
 #endif
+
+#ifdef CONFIG_KEXEC_CORE
+/* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */
+ASSERT(__relocate_new_kernel_end - (__relocate_new_kernel_start & ~(SZ_4K - 1))
+	<= SZ_4K, "kexec relocation code is too big or misaligned")
+ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is brokern")
+#endif