diff mbox series

[v9,15/18] arm64: kexec: kexec EL2 vectors

Message ID 20200326032420.27220-16-pasha.tatashin@soleen.com (mailing list archive)
State New, archived
Headers show
Series arm64: MMU enabled kexec relocation | expand

Commit Message

Pasha Tatashin March 26, 2020, 3:24 a.m. UTC
If we have a EL2 mode without VHE, the EL2 vectors are needed in order
to switch to EL2 and jump to new world with hyperivsor privileges.

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/include/asm/kexec.h      |  5 +++++
 arch/arm64/kernel/asm-offsets.c     |  1 +
 arch/arm64/kernel/machine_kexec.c   |  5 +++++
 arch/arm64/kernel/relocate_kernel.S | 35 +++++++++++++++++++++++++++++
 4 files changed, 46 insertions(+)

Comments

Marc Zyngier April 29, 2020, 5:35 p.m. UTC | #1
On 2020-03-26 03:24, Pavel Tatashin wrote:
> If we have a EL2 mode without VHE, the EL2 vectors are needed in order
> to switch to EL2 and jump to new world with hyperivsor privileges.
> 
> Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
> ---
>  arch/arm64/include/asm/kexec.h      |  5 +++++
>  arch/arm64/kernel/asm-offsets.c     |  1 +
>  arch/arm64/kernel/machine_kexec.c   |  5 +++++
>  arch/arm64/kernel/relocate_kernel.S | 35 +++++++++++++++++++++++++++++
>  4 files changed, 46 insertions(+)
> 
> diff --git a/arch/arm64/include/asm/kexec.h 
> b/arch/arm64/include/asm/kexec.h
> index d944c2e289b2..0f758fd51518 100644
> --- a/arch/arm64/include/asm/kexec.h
> +++ b/arch/arm64/include/asm/kexec.h
> @@ -95,6 +95,7 @@ static inline void crash_post_resume(void) {}
>  extern const unsigned long kexec_relocate_code_size;
>  extern const unsigned char kexec_relocate_code_start[];
>  extern const unsigned long kexec_kern_reloc_offset;
> +extern const unsigned long kexec_el2_vectors_offset;
>  #endif
> 
>  /*
> @@ -104,6 +105,9 @@ extern const unsigned long kexec_kern_reloc_offset;
>   *		kernel, or purgatory entry address).
>   * kern_arg0	first argument to kernel is its dtb address. The other
>   *		arguments are currently unused, and must be set to 0
> + * el2_vector	If present means that relocation routine will go to EL1
> + *		from EL2 to do the copy, and then back to EL2 to do the jump
> + *		to new world.
>   */
>  struct kern_reloc_arg {
>  	phys_addr_t head;
> @@ -112,6 +116,7 @@ struct kern_reloc_arg {
>  	phys_addr_t kern_arg1;
>  	phys_addr_t kern_arg2;
>  	phys_addr_t kern_arg3;
> +	phys_addr_t el2_vector;
>  };
> 
>  #define ARCH_HAS_KIMAGE_ARCH
> diff --git a/arch/arm64/kernel/asm-offsets.c 
> b/arch/arm64/kernel/asm-offsets.c
> index 448230684749..ff974b648347 100644
> --- a/arch/arm64/kernel/asm-offsets.c
> +++ b/arch/arm64/kernel/asm-offsets.c
> @@ -136,6 +136,7 @@ int main(void)
>    DEFINE(KEXEC_KRELOC_KERN_ARG1,	offsetof(struct kern_reloc_arg, 
> kern_arg1));
>    DEFINE(KEXEC_KRELOC_KERN_ARG2,	offsetof(struct kern_reloc_arg, 
> kern_arg2));
>    DEFINE(KEXEC_KRELOC_KERN_ARG3,	offsetof(struct kern_reloc_arg, 
> kern_arg3));
> +  DEFINE(KEXEC_KRELOC_EL2_VECTOR,	offsetof(struct kern_reloc_arg, 
> el2_vector));
>  #endif
>    return 0;
>  }
> diff --git a/arch/arm64/kernel/machine_kexec.c
> b/arch/arm64/kernel/machine_kexec.c
> index ab571fca9bd1..bd398def7627 100644
> --- a/arch/arm64/kernel/machine_kexec.c
> +++ b/arch/arm64/kernel/machine_kexec.c
> @@ -84,6 +84,11 @@ int machine_kexec_post_load(struct kimage *kimage)
>  	kern_reloc_arg->head = kimage->head;
>  	kern_reloc_arg->entry_addr = kimage->start;
>  	kern_reloc_arg->kern_arg0 = kimage->arch.dtb_mem;
> +	/* Setup vector table only when EL2 is available, but no VHE */
> +	if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
> +		kern_reloc_arg->el2_vector = __pa(reloc_code)
> +						+ kexec_el2_vectors_offset;
> +	}
>  	kexec_image_info(kimage);
> 
>  	return 0;
> diff --git a/arch/arm64/kernel/relocate_kernel.S
> b/arch/arm64/kernel/relocate_kernel.S
> index aa9f2b2cd77c..6fd2fc0ef373 100644
> --- a/arch/arm64/kernel/relocate_kernel.S
> +++ b/arch/arm64/kernel/relocate_kernel.S
> @@ -89,6 +89,38 @@ ENTRY(arm64_relocate_new_kernel)
>  .ltorg
>  END(arm64_relocate_new_kernel)
> 
> +.macro el1_sync_64
> +	br	x4			/* Jump to new world from el2 */
> +	.fill 31, 4, 0			/* Set other 31 instr to zeroes */
> +.endm

The common idiom to write this is to align the beginning of the
macro, and not to bother about what follows:

.macro whatever
         .align 7
         br      x4
.endm

Specially given that 0 is an undefined instruction, and I really hate to 
see
those in the actual text. On the contrary, .align generates NOPs.

> +
> +.macro invalid_vector label
> +\label:
> +	b \label
> +	.fill 31, 4, 0			/* Set other 31 instr to zeroes */
> +.endm
> +
> +/* el2 vectors - switch el2 here while we restore the memory image. */
> +	.align 11
> +ENTRY(kexec_el2_vectors)

Please see commit 617a2f392c92 ("arm64: kvm: Annotate assembly using 
modern
annoations"), and follow the same pattern.

> +	invalid_vector el2_sync_invalid_sp0	/* Synchronous EL2t */
> +	invalid_vector el2_irq_invalid_sp0	/* IRQ EL2t */
> +	invalid_vector el2_fiq_invalid_sp0	/* FIQ EL2t */
> +	invalid_vector el2_error_invalid_sp0	/* Error EL2t */
> +	invalid_vector el2_sync_invalid_spx	/* Synchronous EL2h */
> +	invalid_vector el2_irq_invalid_spx	/* IRQ EL2h */
> +	invalid_vector el2_fiq_invalid_spx	/* FIQ EL2h */
> +	invalid_vector el2_error_invalid_spx	/* Error EL2h */
> +		el1_sync_64			/* Synchronous 64-bit EL1 */
> +	invalid_vector el1_irq_invalid_64	/* IRQ 64-bit EL1 */
> +	invalid_vector el1_fiq_invalid_64	/* FIQ 64-bit EL1 */
> +	invalid_vector el1_error_invalid_64	/* Error 64-bit EL1 */
> +	invalid_vector el1_sync_invalid_32	/* Synchronous 32-bit EL1 */
> +	invalid_vector el1_irq_invalid_32	/* IRQ 32-bit EL1 */
> +	invalid_vector el1_fiq_invalid_32	/* FIQ 32-bit EL1 */
> +	invalid_vector el1_error_invalid_32	/* Error 32-bit EL1 */
> +END(kexec_el2_vectors)

Please write the vectors in 4 groups of 4, as this makes it a lot easier
to follow what is what.

> +
>  .Lkexec_relocate_code_end:
>  .org	KEXEC_CONTROL_PAGE_SIZE
>  .align 3	/* To keep the 64-bit values below naturally aligned. */
> @@ -102,3 +134,6 @@ kexec_relocate_code_size:
>  .globl kexec_kern_reloc_offset
>  kexec_kern_reloc_offset:
>  	.quad	arm64_relocate_new_kernel - kexec_relocate_code_start
> +.globl kexec_el2_vectors_offset
> +kexec_el2_vectors_offset:
> +	.quad	kexec_el2_vectors - kexec_relocate_code_start

Thanks,

         M.
James Morse May 7, 2020, 4:21 p.m. UTC | #2
Hi Pavel,

What happened to the subject?
(it really needs a verb to make any sense)

On 26/03/2020 03:24, Pavel Tatashin wrote:
> If we have a EL2 mode without VHE, the EL2 vectors are needed in order
> to switch to EL2 and jump to new world with hyperivsor privileges.

Yes, but the hyp-stub has an API to let you do this... but you need your own version.

Could you explain why in the commit message?

(spelling: hyperivsor)


> diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
> index ab571fca9bd1..bd398def7627 100644
> --- a/arch/arm64/kernel/machine_kexec.c
> +++ b/arch/arm64/kernel/machine_kexec.c
> @@ -84,6 +84,11 @@ int machine_kexec_post_load(struct kimage *kimage)
>  	kern_reloc_arg->head = kimage->head;
>  	kern_reloc_arg->entry_addr = kimage->start;
>  	kern_reloc_arg->kern_arg0 = kimage->arch.dtb_mem;
> +	/* Setup vector table only when EL2 is available, but no VHE */
> +	if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
> +		kern_reloc_arg->el2_vector = __pa(reloc_code)
> +						+ kexec_el2_vectors_offset;
> +	}

Why does the asm relocation code need to know where the vector is? It must access it via HVC.




Thanks,

James
Pasha Tatashin Jan. 25, 2021, 7:07 p.m. UTC | #3
> > +.macro el1_sync_64
> > +     br      x4                      /* Jump to new world from el2 */
> > +     .fill 31, 4, 0                  /* Set other 31 instr to zeroes */
> > +.endm
>
> The common idiom to write this is to align the beginning of the
> macro, and not to bother about what follows:
>
> .macro whatever
>          .align 7
>          br      x4
> .endm
>
> Specially given that 0 is an undefined instruction, and I really hate to
> see
> those in the actual text. On the contrary, .align generates NOPs.

Fixed that.

>
> > +
> > +.macro invalid_vector label
> > +\label:
> > +     b \label
> > +     .fill 31, 4, 0                  /* Set other 31 instr to zeroes */
> > +.endm
> > +
> > +/* el2 vectors - switch el2 here while we restore the memory image. */
> > +     .align 11
> > +ENTRY(kexec_el2_vectors)
>
> Please see commit 617a2f392c92 ("arm64: kvm: Annotate assembly using
> modern
> annoations"), and follow the same pattern.

Fixed that as well.

Thank you,
Pasha
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index d944c2e289b2..0f758fd51518 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -95,6 +95,7 @@  static inline void crash_post_resume(void) {}
 extern const unsigned long kexec_relocate_code_size;
 extern const unsigned char kexec_relocate_code_start[];
 extern const unsigned long kexec_kern_reloc_offset;
+extern const unsigned long kexec_el2_vectors_offset;
 #endif
 
 /*
@@ -104,6 +105,9 @@  extern const unsigned long kexec_kern_reloc_offset;
  *		kernel, or purgatory entry address).
  * kern_arg0	first argument to kernel is its dtb address. The other
  *		arguments are currently unused, and must be set to 0
+ * el2_vector	If present means that relocation routine will go to EL1
+ *		from EL2 to do the copy, and then back to EL2 to do the jump
+ *		to new world.
  */
 struct kern_reloc_arg {
 	phys_addr_t head;
@@ -112,6 +116,7 @@  struct kern_reloc_arg {
 	phys_addr_t kern_arg1;
 	phys_addr_t kern_arg2;
 	phys_addr_t kern_arg3;
+	phys_addr_t el2_vector;
 };
 
 #define ARCH_HAS_KIMAGE_ARCH
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 448230684749..ff974b648347 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -136,6 +136,7 @@  int main(void)
   DEFINE(KEXEC_KRELOC_KERN_ARG1,	offsetof(struct kern_reloc_arg, kern_arg1));
   DEFINE(KEXEC_KRELOC_KERN_ARG2,	offsetof(struct kern_reloc_arg, kern_arg2));
   DEFINE(KEXEC_KRELOC_KERN_ARG3,	offsetof(struct kern_reloc_arg, kern_arg3));
+  DEFINE(KEXEC_KRELOC_EL2_VECTOR,	offsetof(struct kern_reloc_arg, el2_vector));
 #endif
   return 0;
 }
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index ab571fca9bd1..bd398def7627 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -84,6 +84,11 @@  int machine_kexec_post_load(struct kimage *kimage)
 	kern_reloc_arg->head = kimage->head;
 	kern_reloc_arg->entry_addr = kimage->start;
 	kern_reloc_arg->kern_arg0 = kimage->arch.dtb_mem;
+	/* Setup vector table only when EL2 is available, but no VHE */
+	if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
+		kern_reloc_arg->el2_vector = __pa(reloc_code)
+						+ kexec_el2_vectors_offset;
+	}
 	kexec_image_info(kimage);
 
 	return 0;
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index aa9f2b2cd77c..6fd2fc0ef373 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -89,6 +89,38 @@  ENTRY(arm64_relocate_new_kernel)
 .ltorg
 END(arm64_relocate_new_kernel)
 
+.macro el1_sync_64
+	br	x4			/* Jump to new world from el2 */
+	.fill 31, 4, 0			/* Set other 31 instr to zeroes */
+.endm
+
+.macro invalid_vector label
+\label:
+	b \label
+	.fill 31, 4, 0			/* Set other 31 instr to zeroes */
+.endm
+
+/* el2 vectors - switch el2 here while we restore the memory image. */
+	.align 11
+ENTRY(kexec_el2_vectors)
+	invalid_vector el2_sync_invalid_sp0	/* Synchronous EL2t */
+	invalid_vector el2_irq_invalid_sp0	/* IRQ EL2t */
+	invalid_vector el2_fiq_invalid_sp0	/* FIQ EL2t */
+	invalid_vector el2_error_invalid_sp0	/* Error EL2t */
+	invalid_vector el2_sync_invalid_spx	/* Synchronous EL2h */
+	invalid_vector el2_irq_invalid_spx	/* IRQ EL2h */
+	invalid_vector el2_fiq_invalid_spx	/* FIQ EL2h */
+	invalid_vector el2_error_invalid_spx	/* Error EL2h */
+		el1_sync_64			/* Synchronous 64-bit EL1 */
+	invalid_vector el1_irq_invalid_64	/* IRQ 64-bit EL1 */
+	invalid_vector el1_fiq_invalid_64	/* FIQ 64-bit EL1 */
+	invalid_vector el1_error_invalid_64	/* Error 64-bit EL1 */
+	invalid_vector el1_sync_invalid_32	/* Synchronous 32-bit EL1 */
+	invalid_vector el1_irq_invalid_32	/* IRQ 32-bit EL1 */
+	invalid_vector el1_fiq_invalid_32	/* FIQ 32-bit EL1 */
+	invalid_vector el1_error_invalid_32	/* Error 32-bit EL1 */
+END(kexec_el2_vectors)
+
 .Lkexec_relocate_code_end:
 .org	KEXEC_CONTROL_PAGE_SIZE
 .align 3	/* To keep the 64-bit values below naturally aligned. */
@@ -102,3 +134,6 @@  kexec_relocate_code_size:
 .globl kexec_kern_reloc_offset
 kexec_kern_reloc_offset:
 	.quad	arm64_relocate_new_kernel - kexec_relocate_code_start
+.globl kexec_el2_vectors_offset
+kexec_el2_vectors_offset:
+	.quad	kexec_el2_vectors - kexec_relocate_code_start