diff mbox series

[v4,14/21] arm64: Honor VHE being disabled from the command-line

Message ID 20210118094533.2874082-15-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: Early CPU feature override, and applications to VHE, BTI and PAuth | expand

Commit Message

Marc Zyngier Jan. 18, 2021, 9:45 a.m. UTC
Finally we can check whether VHE is disabled on the command line,
and not enable it if that's the user's wish.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kernel/hyp-stub.S | 17 ++++++++++++++---
 1 file changed, 14 insertions(+), 3 deletions(-)

Comments

David Brazdil Jan. 18, 2021, 1:14 p.m. UTC | #1
On Mon, Jan 18, 2021 at 09:45:26AM +0000, Marc Zyngier wrote:
> Finally we can check whether VHE is disabled on the command line,
> and not enable it if that's the user's wish.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
Acked-by: David Brazdil <dbrazdil@google.com>

> ---
>  arch/arm64/kernel/hyp-stub.S | 17 ++++++++++++++---
>  1 file changed, 14 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
> index 59820f9b8522..bbab2148a2a2 100644
> --- a/arch/arm64/kernel/hyp-stub.S
> +++ b/arch/arm64/kernel/hyp-stub.S
> @@ -77,13 +77,24 @@ SYM_CODE_END(el1_sync)
>  SYM_CODE_START_LOCAL(mutate_to_vhe)
>  	// Sanity check: MMU *must* be off
>  	mrs	x0, sctlr_el2
> -	tbnz	x0, #0, 1f
> +	tbnz	x0, #0, 2f
>  
>  	// Needs to be VHE capable, obviously
>  	mrs	x0, id_aa64mmfr1_el1
>  	ubfx	x0, x0, #ID_AA64MMFR1_VHE_SHIFT, #4
> -	cbz	x0, 1f
> +	cbz	x0, 2f
>  
> +	// Check whether VHE is disabled from the command line
> +	adr_l	x1, id_aa64mmfr1_val
> +	ldr	x0, [x1]
> +	adr_l	x1, id_aa64mmfr1_mask
> +	ldr	x1, [x1]
super nit: There's a ldr_l macro

> +	ubfx	x0, x0, #ID_AA64MMFR1_VHE_SHIFT, #4
> +	ubfx	x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
> +	cbz	x1, 1f
> +	and	x0, x0, x1
> +	cbz	x0, 2f
> +1:
>  	// Engage the VHE magic!
>  	mov_q	x0, HCR_HOST_VHE_FLAGS
>  	msr	hcr_el2, x0
> @@ -152,7 +163,7 @@ skip_spe:
>  	orr	x0, x0, x1
>  	msr	spsr_el1, x0
>  
> -1:	eret
> +2:	eret
>  SYM_CODE_END(mutate_to_vhe)
>  
>  .macro invalid_vector	label
> -- 
> 2.29.2
>
Catalin Marinas Jan. 23, 2021, 2:07 p.m. UTC | #2
On Mon, Jan 18, 2021 at 09:45:26AM +0000, Marc Zyngier wrote:
> diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
> index 59820f9b8522..bbab2148a2a2 100644
> --- a/arch/arm64/kernel/hyp-stub.S
> +++ b/arch/arm64/kernel/hyp-stub.S
> @@ -77,13 +77,24 @@ SYM_CODE_END(el1_sync)
>  SYM_CODE_START_LOCAL(mutate_to_vhe)
>  	// Sanity check: MMU *must* be off
>  	mrs	x0, sctlr_el2
> -	tbnz	x0, #0, 1f
> +	tbnz	x0, #0, 2f
>  
>  	// Needs to be VHE capable, obviously
>  	mrs	x0, id_aa64mmfr1_el1
>  	ubfx	x0, x0, #ID_AA64MMFR1_VHE_SHIFT, #4
> -	cbz	x0, 1f
> +	cbz	x0, 2f
>  
> +	// Check whether VHE is disabled from the command line
> +	adr_l	x1, id_aa64mmfr1_val
> +	ldr	x0, [x1]
> +	adr_l	x1, id_aa64mmfr1_mask
> +	ldr	x1, [x1]
> +	ubfx	x0, x0, #ID_AA64MMFR1_VHE_SHIFT, #4
> +	ubfx	x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
> +	cbz	x1, 1f
> +	and	x0, x0, x1
> +	cbz	x0, 2f
> +1:

I can see the advantage here in separate id_aa64mmfr1_val/mask but we
could use some asm offsets here and keep the pointer indirection simpler
in C code. You'd just need something like 'adr_l mmfr1_ovrd + VAL_OFFSET'.

Anyway, if you have a strong preference for the current approach, leave
it as is.
Marc Zyngier Jan. 24, 2021, 3:59 p.m. UTC | #3
On Sat, 23 Jan 2021 14:07:53 +0000,
Catalin Marinas <catalin.marinas@arm.com> wrote:
> 
> On Mon, Jan 18, 2021 at 09:45:26AM +0000, Marc Zyngier wrote:
> > diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
> > index 59820f9b8522..bbab2148a2a2 100644
> > --- a/arch/arm64/kernel/hyp-stub.S
> > +++ b/arch/arm64/kernel/hyp-stub.S
> > @@ -77,13 +77,24 @@ SYM_CODE_END(el1_sync)
> >  SYM_CODE_START_LOCAL(mutate_to_vhe)
> >  	// Sanity check: MMU *must* be off
> >  	mrs	x0, sctlr_el2
> > -	tbnz	x0, #0, 1f
> > +	tbnz	x0, #0, 2f
> >  
> >  	// Needs to be VHE capable, obviously
> >  	mrs	x0, id_aa64mmfr1_el1
> >  	ubfx	x0, x0, #ID_AA64MMFR1_VHE_SHIFT, #4
> > -	cbz	x0, 1f
> > +	cbz	x0, 2f
> >  
> > +	// Check whether VHE is disabled from the command line
> > +	adr_l	x1, id_aa64mmfr1_val
> > +	ldr	x0, [x1]
> > +	adr_l	x1, id_aa64mmfr1_mask
> > +	ldr	x1, [x1]
> > +	ubfx	x0, x0, #ID_AA64MMFR1_VHE_SHIFT, #4
> > +	ubfx	x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
> > +	cbz	x1, 1f
> > +	and	x0, x0, x1
> > +	cbz	x0, 2f
> > +1:
> 
> I can see the advantage here in separate id_aa64mmfr1_val/mask but we
> could use some asm offsets here and keep the pointer indirection simpler
> in C code. You'd just need something like 'adr_l mmfr1_ovrd + VAL_OFFSET'.
> 
> Anyway, if you have a strong preference for the current approach, leave
> it as is.

I've now moved over to a structure containing both val/mask, meaning
that we only need to keep a single pointer around in the various
feature descriptors. It certainly looks better.

Thanks,

	M.
diff mbox series

Patch

diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 59820f9b8522..bbab2148a2a2 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -77,13 +77,24 @@  SYM_CODE_END(el1_sync)
 SYM_CODE_START_LOCAL(mutate_to_vhe)
 	// Sanity check: MMU *must* be off
 	mrs	x0, sctlr_el2
-	tbnz	x0, #0, 1f
+	tbnz	x0, #0, 2f
 
 	// Needs to be VHE capable, obviously
 	mrs	x0, id_aa64mmfr1_el1
 	ubfx	x0, x0, #ID_AA64MMFR1_VHE_SHIFT, #4
-	cbz	x0, 1f
+	cbz	x0, 2f
 
+	// Check whether VHE is disabled from the command line
+	adr_l	x1, id_aa64mmfr1_val
+	ldr	x0, [x1]
+	adr_l	x1, id_aa64mmfr1_mask
+	ldr	x1, [x1]
+	ubfx	x0, x0, #ID_AA64MMFR1_VHE_SHIFT, #4
+	ubfx	x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
+	cbz	x1, 1f
+	and	x0, x0, x1
+	cbz	x0, 2f
+1:
 	// Engage the VHE magic!
 	mov_q	x0, HCR_HOST_VHE_FLAGS
 	msr	hcr_el2, x0
@@ -152,7 +163,7 @@  skip_spe:
 	orr	x0, x0, x1
 	msr	spsr_el1, x0
 
-1:	eret
+2:	eret
 SYM_CODE_END(mutate_to_vhe)
 
 .macro invalid_vector	label