diff mbox series

[v2,04/17] arm64: Provide an 'upgrade to VHE' stub hypercall

Message ID 20210104135011.2063104-5-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: Early CPU feature override, and an application to VHE | expand

Commit Message

Marc Zyngier Jan. 4, 2021, 1:49 p.m. UTC
As we are about to change the way a VHE system boots, let's
provide the core helper, in the form of a stub hypercall that
enables VHE and replicates the full EL1 context at EL2, thanks
to EL1 and VHE-EL2 being extremely similar.

On exception return, the kernel carries on at EL2. Fancy!

Nothing calls this new hypercall yet, so no functional change.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/virt.h |  7 +++-
 arch/arm64/kernel/hyp-stub.S  | 70 ++++++++++++++++++++++++++++++++++-
 2 files changed, 74 insertions(+), 3 deletions(-)

Comments

Jing Zhang Jan. 5, 2021, 8:35 p.m. UTC | #1
On Mon, Jan 4, 2021 at 7:50 AM Marc Zyngier <maz@kernel.org> wrote:
>
> As we are about to change the way a VHE system boots, let's
> provide the core helper, in the form of a stub hypercall that
> enables VHE and replicates the full EL1 context at EL2, thanks
> to EL1 and VHE-EL2 being extremely similar.
>
> On exception return, the kernel carries on at EL2. Fancy!
>
> Nothing calls this new hypercall yet, so no functional change.
>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/include/asm/virt.h |  7 +++-
>  arch/arm64/kernel/hyp-stub.S  | 70 ++++++++++++++++++++++++++++++++++-
>  2 files changed, 74 insertions(+), 3 deletions(-)
>
> diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
> index ee6a48df89d9..7379f35ae2c6 100644
> --- a/arch/arm64/include/asm/virt.h
> +++ b/arch/arm64/include/asm/virt.h
> @@ -35,8 +35,13 @@
>   */
>  #define HVC_RESET_VECTORS 2
>
> +/*
> + * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible
> + */
> +#define HVC_VHE_RESTART        3
> +
>  /* Max number of HYP stub hypercalls */
> -#define HVC_STUB_HCALL_NR 3
> +#define HVC_STUB_HCALL_NR 4
>
>  /* Error returned when an invalid stub number is passed into x0 */
>  #define HVC_STUB_ERR   0xbadca11
> diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
> index 160f5881a0b7..6ffdc1f7778b 100644
> --- a/arch/arm64/kernel/hyp-stub.S
> +++ b/arch/arm64/kernel/hyp-stub.S
> @@ -8,9 +8,9 @@
>
>  #include <linux/init.h>
>  #include <linux/linkage.h>
> -#include <linux/irqchip/arm-gic-v3.h>
>
>  #include <asm/assembler.h>
> +#include <asm/el2_setup.h>
>  #include <asm/kvm_arm.h>
>  #include <asm/kvm_asm.h>
>  #include <asm/ptrace.h>
> @@ -47,10 +47,16 @@ SYM_CODE_END(__hyp_stub_vectors)
>
>  SYM_CODE_START_LOCAL(el1_sync)
>         cmp     x0, #HVC_SET_VECTORS
> -       b.ne    2f
> +       b.ne    1f
>         msr     vbar_el2, x1
>         b       9f
>
> +1:     cmp     x0, #HVC_VHE_RESTART
> +       b.ne    2f
> +       mov     x0, #HVC_SOFT_RESTART
> +       adr     x1, mutate_to_vhe
> +       // fall through...
Does "br x1" work here instead of falling through into
HVC_SOFT_RESTART? Just curious about the reason for falling-through.
> +
>  2:     cmp     x0, #HVC_SOFT_RESTART
>         b.ne    3f
>         mov     x0, x2
> @@ -70,6 +76,66 @@ SYM_CODE_START_LOCAL(el1_sync)
>         eret
>  SYM_CODE_END(el1_sync)
>
> +// nVHE? No way! Give me the real thing!
> +SYM_CODE_START_LOCAL(mutate_to_vhe)
> +       // Sanity check: MMU *must* be off
> +       mrs     x0, sctlr_el2
> +       tbnz    x0, #0, 1f
> +
> +       // Needs to be VHE capable, obviously
> +       mrs     x0, id_aa64mmfr1_el1
> +       ubfx    x0, x0, #ID_AA64MMFR1_VHE_SHIFT, #4
> +       cbz     x0, 1f
> +
> +       // Engage the VHE magic!
> +       mov_q   x0, HCR_HOST_VHE_FLAGS
> +       msr     hcr_el2, x0
> +       isb
> +
> +       // Doesn't do much on VHE, but still, worth a shot
> +       init_el2_state vhe
> +
> +       // Use the EL1 allocated stack, per-cpu offset
> +       mrs     x0, sp_el1
> +       mov     sp, x0
> +       mrs     x0, tpidr_el1
> +       msr     tpidr_el2, x0
> +
> +       // FP configuration, vectors
> +       mrs_s   x0, SYS_CPACR_EL12
> +       msr     cpacr_el1, x0
> +       mrs_s   x0, SYS_VBAR_EL12
> +       msr     vbar_el1, x0
> +
> +       // Transfert the MM state from EL1 to EL2
> +       mrs_s   x0, SYS_TCR_EL12
> +       msr     tcr_el1, x0
> +       mrs_s   x0, SYS_TTBR0_EL12
> +       msr     ttbr0_el1, x0
> +       mrs_s   x0, SYS_TTBR1_EL12
> +       msr     ttbr1_el1, x0
> +       mrs_s   x0, SYS_MAIR_EL12
> +       msr     mair_el1, x0
> +       isb
> +
> +       // Invalidate TLBs before enabling the MMU
> +       tlbi    vmalle1
> +       dsb     nsh
> +
> +       // Enable the EL2 S1 MMU, as set up from EL1
> +       mrs_s   x0, SYS_SCTLR_EL12
> +       set_sctlr_el1   x0
> +
> +       // Hack the exception return to stay at EL2
> +       mrs     x0, spsr_el1
> +       and     x0, x0, #~PSR_MODE_MASK
> +       mov     x1, #PSR_MODE_EL2h
> +       orr     x0, x0, x1
> +       msr     spsr_el1, x0
> +
> +1:     eret
> +SYM_CODE_END(mutate_to_vhe)
> +
>  .macro invalid_vector  label
>  SYM_CODE_START_LOCAL(\label)
>         b \label
> --
> 2.29.2
>
> _______________________________________________
> kvmarm mailing list
> kvmarm@lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Marc Zyngier Jan. 5, 2021, 9:03 p.m. UTC | #2
On Tue, 05 Jan 2021 20:35:35 +0000,
Jing Zhang <jingzhangos@google.com> wrote:
> 
> On Mon, Jan 4, 2021 at 7:50 AM Marc Zyngier <maz@kernel.org> wrote:
> >
> > As we are about to change the way a VHE system boots, let's
> > provide the core helper, in the form of a stub hypercall that
> > enables VHE and replicates the full EL1 context at EL2, thanks
> > to EL1 and VHE-EL2 being extremely similar.
> >
> > On exception return, the kernel carries on at EL2. Fancy!
> >
> > Nothing calls this new hypercall yet, so no functional change.
> >
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> >  arch/arm64/include/asm/virt.h |  7 +++-
> >  arch/arm64/kernel/hyp-stub.S  | 70 ++++++++++++++++++++++++++++++++++-
> >  2 files changed, 74 insertions(+), 3 deletions(-)
> >
> > diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
> > index ee6a48df89d9..7379f35ae2c6 100644
> > --- a/arch/arm64/include/asm/virt.h
> > +++ b/arch/arm64/include/asm/virt.h
> > @@ -35,8 +35,13 @@
> >   */
> >  #define HVC_RESET_VECTORS 2
> >
> > +/*
> > + * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible
> > + */
> > +#define HVC_VHE_RESTART        3
> > +
> >  /* Max number of HYP stub hypercalls */
> > -#define HVC_STUB_HCALL_NR 3
> > +#define HVC_STUB_HCALL_NR 4
> >
> >  /* Error returned when an invalid stub number is passed into x0 */
> >  #define HVC_STUB_ERR   0xbadca11
> > diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
> > index 160f5881a0b7..6ffdc1f7778b 100644
> > --- a/arch/arm64/kernel/hyp-stub.S
> > +++ b/arch/arm64/kernel/hyp-stub.S
> > @@ -8,9 +8,9 @@
> >
> >  #include <linux/init.h>
> >  #include <linux/linkage.h>
> > -#include <linux/irqchip/arm-gic-v3.h>
> >
> >  #include <asm/assembler.h>
> > +#include <asm/el2_setup.h>
> >  #include <asm/kvm_arm.h>
> >  #include <asm/kvm_asm.h>
> >  #include <asm/ptrace.h>
> > @@ -47,10 +47,16 @@ SYM_CODE_END(__hyp_stub_vectors)
> >
> >  SYM_CODE_START_LOCAL(el1_sync)
> >         cmp     x0, #HVC_SET_VECTORS
> > -       b.ne    2f
> > +       b.ne    1f
> >         msr     vbar_el2, x1
> >         b       9f
> >
> > +1:     cmp     x0, #HVC_VHE_RESTART
> > +       b.ne    2f
> > +       mov     x0, #HVC_SOFT_RESTART
> > +       adr     x1, mutate_to_vhe
> > +       // fall through...
> Does "br x1" work here instead of falling through into
> HVC_SOFT_RESTART? Just curious about the reason for falling-through.

Absolutely.

This is the leftover from an initial (and pretty over-engineered)
approach where the caller was using HVC_SOFT_RESTART with the address
of mutate_to_vhe passed as the target. It was an interesting idea, but
obtaining the PA for the function ended when the MMU is on created a
separate, harder to maintain path.

Cleanup time, I guess.

Thanks,

	M.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index ee6a48df89d9..7379f35ae2c6 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -35,8 +35,13 @@ 
  */
 #define HVC_RESET_VECTORS 2
 
+/*
+ * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible
+ */
+#define HVC_VHE_RESTART	3
+
 /* Max number of HYP stub hypercalls */
-#define HVC_STUB_HCALL_NR 3
+#define HVC_STUB_HCALL_NR 4
 
 /* Error returned when an invalid stub number is passed into x0 */
 #define HVC_STUB_ERR	0xbadca11
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 160f5881a0b7..6ffdc1f7778b 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -8,9 +8,9 @@ 
 
 #include <linux/init.h>
 #include <linux/linkage.h>
-#include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/assembler.h>
+#include <asm/el2_setup.h>
 #include <asm/kvm_arm.h>
 #include <asm/kvm_asm.h>
 #include <asm/ptrace.h>
@@ -47,10 +47,16 @@  SYM_CODE_END(__hyp_stub_vectors)
 
 SYM_CODE_START_LOCAL(el1_sync)
 	cmp	x0, #HVC_SET_VECTORS
-	b.ne	2f
+	b.ne	1f
 	msr	vbar_el2, x1
 	b	9f
 
+1:	cmp	x0, #HVC_VHE_RESTART
+	b.ne	2f
+	mov	x0, #HVC_SOFT_RESTART
+	adr	x1, mutate_to_vhe
+	// fall through...
+
 2:	cmp	x0, #HVC_SOFT_RESTART
 	b.ne	3f
 	mov	x0, x2
@@ -70,6 +76,66 @@  SYM_CODE_START_LOCAL(el1_sync)
 	eret
 SYM_CODE_END(el1_sync)
 
+// nVHE? No way! Give me the real thing!
+SYM_CODE_START_LOCAL(mutate_to_vhe)
+	// Sanity check: MMU *must* be off
+	mrs	x0, sctlr_el2
+	tbnz	x0, #0, 1f
+
+	// Needs to be VHE capable, obviously
+	mrs	x0, id_aa64mmfr1_el1
+	ubfx	x0, x0, #ID_AA64MMFR1_VHE_SHIFT, #4
+	cbz	x0, 1f
+
+	// Engage the VHE magic!
+	mov_q	x0, HCR_HOST_VHE_FLAGS
+	msr	hcr_el2, x0
+	isb
+
+	// Doesn't do much on VHE, but still, worth a shot
+	init_el2_state vhe
+
+	// Use the EL1 allocated stack, per-cpu offset
+	mrs	x0, sp_el1
+	mov	sp, x0
+	mrs	x0, tpidr_el1
+	msr	tpidr_el2, x0
+
+	// FP configuration, vectors
+	mrs_s	x0, SYS_CPACR_EL12
+	msr	cpacr_el1, x0
+	mrs_s	x0, SYS_VBAR_EL12
+	msr	vbar_el1, x0
+
+	// Transfert the MM state from EL1 to EL2
+	mrs_s	x0, SYS_TCR_EL12
+	msr	tcr_el1, x0
+	mrs_s	x0, SYS_TTBR0_EL12
+	msr	ttbr0_el1, x0
+	mrs_s	x0, SYS_TTBR1_EL12
+	msr	ttbr1_el1, x0
+	mrs_s	x0, SYS_MAIR_EL12
+	msr	mair_el1, x0
+	isb
+
+	// Invalidate TLBs before enabling the MMU
+	tlbi	vmalle1
+	dsb	nsh
+
+	// Enable the EL2 S1 MMU, as set up from EL1
+	mrs_s	x0, SYS_SCTLR_EL12
+	set_sctlr_el1	x0
+
+	// Hack the exception return to stay at EL2
+	mrs	x0, spsr_el1
+	and	x0, x0, #~PSR_MODE_MASK
+	mov	x1, #PSR_MODE_EL2h
+	orr	x0, x0, x1
+	msr	spsr_el1, x0
+
+1:	eret
+SYM_CODE_END(mutate_to_vhe)
+
 .macro invalid_vector	label
 SYM_CODE_START_LOCAL(\label)
 	b \label