diff mbox series

[2/3] KVM: arm64: Generate final CTR_EL0 value when running in Protected mode

Message ID 20210322164828.800662-3-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM:arm64: Proposed host stage-2 improvements | expand

Commit Message

Marc Zyngier March 22, 2021, 4:48 p.m. UTC
In protected mode, late CPUs are not allowed to boot (enforced by
the PSCI relay). We can thus specialise the read_ctr macro to
always return a pre-computed, sanitised value.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/assembler.h | 9 +++++++++
 arch/arm64/kernel/image-vars.h     | 1 +
 arch/arm64/kvm/va_layout.c         | 7 +++++++
 3 files changed, 17 insertions(+)

Comments

Quentin Perret March 22, 2021, 5:40 p.m. UTC | #1
Hey Marc,

On Monday 22 Mar 2021 at 16:48:27 (+0000), Marc Zyngier wrote:
> In protected mode, late CPUs are not allowed to boot (enforced by
> the PSCI relay). We can thus specialise the read_ctr macro to
> always return a pre-computed, sanitised value.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/include/asm/assembler.h | 9 +++++++++
>  arch/arm64/kernel/image-vars.h     | 1 +
>  arch/arm64/kvm/va_layout.c         | 7 +++++++
>  3 files changed, 17 insertions(+)
> 
> diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
> index fb651c1f26e9..1a4cee7eb3c9 100644
> --- a/arch/arm64/include/asm/assembler.h
> +++ b/arch/arm64/include/asm/assembler.h
> @@ -270,12 +270,21 @@ alternative_endif
>   * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
>   */
>  	.macro	read_ctr, reg
> +#ifndef __KVM_NVHE_HYPERVISOR__
>  alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
>  	mrs	\reg, ctr_el0			// read CTR
>  	nop
>  alternative_else
>  	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
>  alternative_endif
> +#else
> +alternative_cb kvm_compute_final_ctr_el0
> +	movz	\reg, #0
> +	movk	\reg, #0, lsl #16
> +	movk	\reg, #0, lsl #32
> +	movk	\reg, #0, lsl #48
> +alternative_cb_end
> +#endif
>  	.endm

So, FWIW, if we wanted to make _this_ macro BUG in non-protected mode
(and drop patch 01), I think we could do something like:

alternative_cb kvm_compute_final_ctr_el0
	movz	\reg, #0
	ASM_BUG()
	nop
	nop
alternative_cb_end

and then make kvm_compute_final_ctr_el0() check that we're in protected
mode before patching. That would be marginally better as that would
cover _all_ users of read_ctr and not just __flush_dcache_area, but that
first movz is a bit yuck (but necessary to keep generate_mov_q() happy I
think?), so I'll leave the decision to you.

No objection from me for the current implementation, and if you decide to
go with it:

Reviewed-by: Quentin Perret <qperret@google.com>

Thanks,
Quentin
Marc Zyngier March 22, 2021, 6:37 p.m. UTC | #2
On Mon, 22 Mar 2021 17:40:40 +0000,
Quentin Perret <qperret@google.com> wrote:
> 
> Hey Marc,
> 
> On Monday 22 Mar 2021 at 16:48:27 (+0000), Marc Zyngier wrote:
> > In protected mode, late CPUs are not allowed to boot (enforced by
> > the PSCI relay). We can thus specialise the read_ctr macro to
> > always return a pre-computed, sanitised value.
> > 
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> >  arch/arm64/include/asm/assembler.h | 9 +++++++++
> >  arch/arm64/kernel/image-vars.h     | 1 +
> >  arch/arm64/kvm/va_layout.c         | 7 +++++++
> >  3 files changed, 17 insertions(+)
> > 
> > diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
> > index fb651c1f26e9..1a4cee7eb3c9 100644
> > --- a/arch/arm64/include/asm/assembler.h
> > +++ b/arch/arm64/include/asm/assembler.h
> > @@ -270,12 +270,21 @@ alternative_endif
> >   * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
> >   */
> >  	.macro	read_ctr, reg
> > +#ifndef __KVM_NVHE_HYPERVISOR__
> >  alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
> >  	mrs	\reg, ctr_el0			// read CTR
> >  	nop
> >  alternative_else
> >  	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
> >  alternative_endif
> > +#else
> > +alternative_cb kvm_compute_final_ctr_el0
> > +	movz	\reg, #0
> > +	movk	\reg, #0, lsl #16
> > +	movk	\reg, #0, lsl #32
> > +	movk	\reg, #0, lsl #48
> > +alternative_cb_end
> > +#endif
> >  	.endm
> 
> So, FWIW, if we wanted to make _this_ macro BUG in non-protected mode
> (and drop patch 01), I think we could do something like:
> 
> alternative_cb kvm_compute_final_ctr_el0
> 	movz	\reg, #0
> 	ASM_BUG()
> 	nop
> 	nop
> alternative_cb_end
>
> and then make kvm_compute_final_ctr_el0() check that we're in protected
> mode before patching. That would be marginally better as that would
> cover _all_ users of read_ctr and not just __flush_dcache_area, but that
> first movz is a bit yuck (but necessary to keep generate_mov_q() happy I
> think?), so I'll leave the decision to you.

Can't say I'm keen on the yucky bit, but here's an alternative (ha!)
for you:

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 1a4cee7eb3c9..7582c3bd2f05 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -278,6 +278,9 @@ alternative_else
 	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
 alternative_endif
 #else
+alternative_if_not ARM64_KVM_PROTECTED_MODE
+	ASM_BUG()
+alternative_else_nop_endif
 alternative_cb kvm_compute_final_ctr_el0
 	movz	\reg, #0
 	movk	\reg, #0, lsl #16

Yes, it is one more instruction, but it is cleaner and allows us to
from the first patch of the series.

What do you think?

	M.
Quentin Perret March 23, 2021, 9:47 a.m. UTC | #3
Hi Marc,

On Monday 22 Mar 2021 at 18:37:14 (+0000), Marc Zyngier wrote:
> Can't say I'm keen on the yucky bit, but here's an alternative (ha!)
> for you:
> 
> diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
> index 1a4cee7eb3c9..7582c3bd2f05 100644
> --- a/arch/arm64/include/asm/assembler.h
> +++ b/arch/arm64/include/asm/assembler.h
> @@ -278,6 +278,9 @@ alternative_else
>  	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
>  alternative_endif
>  #else
> +alternative_if_not ARM64_KVM_PROTECTED_MODE
> +	ASM_BUG()
> +alternative_else_nop_endif
>  alternative_cb kvm_compute_final_ctr_el0
>  	movz	\reg, #0
>  	movk	\reg, #0, lsl #16
> 
> Yes, it is one more instruction, but it is cleaner and allows us to
> from the first patch of the series.
> 
> What do you think?

Yes, I think having the ASM_BUG() in this macro is bit nicer and I doubt
the additional nop will make any difference, so this is looking good to
me!

Thanks,
Quentin
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index fb651c1f26e9..1a4cee7eb3c9 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -270,12 +270,21 @@  alternative_endif
  * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
  */
 	.macro	read_ctr, reg
+#ifndef __KVM_NVHE_HYPERVISOR__
 alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
 	mrs	\reg, ctr_el0			// read CTR
 	nop
 alternative_else
 	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
 alternative_endif
+#else
+alternative_cb kvm_compute_final_ctr_el0
+	movz	\reg, #0
+	movk	\reg, #0, lsl #16
+	movk	\reg, #0, lsl #32
+	movk	\reg, #0, lsl #48
+alternative_cb_end
+#endif
 	.endm
 
 
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index d5dc2b792651..fdd60cd1d7e8 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -65,6 +65,7 @@  __efistub__ctype		= _ctype;
 KVM_NVHE_ALIAS(kvm_patch_vector_branch);
 KVM_NVHE_ALIAS(kvm_update_va_mask);
 KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
+KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0);
 
 /* Global kernel state accessed by nVHE hyp code. */
 KVM_NVHE_ALIAS(kvm_vgic_global_state);
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index 978301392d67..acdb7b3cc97d 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -288,3 +288,10 @@  void kvm_get_kimage_voffset(struct alt_instr *alt,
 {
 	generate_mov_q(kimage_voffset, origptr, updptr, nr_inst);
 }
+
+void kvm_compute_final_ctr_el0(struct alt_instr *alt,
+			       __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+	generate_mov_q(read_sanitised_ftr_reg(SYS_CTR_EL0),
+		       origptr, updptr, nr_inst);
+}