diff mbox series

[2/2] arm64: Cope with CPUs stuck in VHE mode

Message ID 20210325124721.941182-3-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: Dealing with VHE-only CPUs | expand

Commit Message

Marc Zyngier March 25, 2021, 12:47 p.m. UTC
It seems that the CPUs part of the SoC known as Apple M1 have the
terrible habit of being stuck with HCR_EL2.E2H==1, in violation
of the architecture.

Try and work around this deplorable state of affairs by detecting
the stuck bit early and short-circuit the nVHE dance. Additional
filtering code ensures that attempts at switching to nVHE from
the command-line are also ignored.

It is still unknown whether there are many more such nuggets
to be found...

Reported-by: Hector Martin <marcan@marcan.st>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kernel/head.S           | 33 +++++++++++++++++++++++++++---
 arch/arm64/kernel/hyp-stub.S       | 15 ++++++++++----
 arch/arm64/kernel/idreg-override.c | 13 +++++++++++-
 3 files changed, 53 insertions(+), 8 deletions(-)

Comments

Will Deacon March 25, 2021, 7:33 p.m. UTC | #1
On Thu, Mar 25, 2021 at 12:47:21PM +0000, Marc Zyngier wrote:
> It seems that the CPUs part of the SoC known as Apple M1 have the
> terrible habit of being stuck with HCR_EL2.E2H==1, in violation
> of the architecture.
> 
> Try and work around this deplorable state of affairs by detecting
> the stuck bit early and short-circuit the nVHE dance. Additional
> filtering code ensures that attempts at switching to nVHE from
> the command-line are also ignored.
> 
> It is still unknown whether there are many more such nuggets
> to be found...
> 
> Reported-by: Hector Martin <marcan@marcan.st>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/kernel/head.S           | 33 +++++++++++++++++++++++++++---
>  arch/arm64/kernel/hyp-stub.S       | 15 ++++++++++----
>  arch/arm64/kernel/idreg-override.c | 13 +++++++++++-
>  3 files changed, 53 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index 840bda1869e9..db2de5b8f3d9 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -477,14 +477,13 @@ EXPORT_SYMBOL(kimage_vaddr)
>   * booted in EL1 or EL2 respectively.
>   */
>  SYM_FUNC_START(init_kernel_el)
> -	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
> -	msr	sctlr_el1, x0
> -
>  	mrs	x0, CurrentEL
>  	cmp	x0, #CurrentEL_EL2
>  	b.eq	init_el2
>  
>  SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
> +	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
> +	msr	sctlr_el1, x0
>  	isb
>  	mov_q	x0, INIT_PSTATE_EL1
>  	msr	spsr_el1, x0
> @@ -504,6 +503,34 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
>  	msr	vbar_el2, x0
>  	isb
>  
> +	/*
> +	 * Fruity CPUs seem to have HCR_EL2.E2H set to RES1,
> +	 * making it impossible to start in nVHE mode. Is that
> +	 * compliant with the architecture? Absolutely not!
> +	 */
> +	mrs	x0, hcr_el2
> +	and	x0, x0, #HCR_E2H
> +	cbz	x0, 1f
> +
> +	/* Switching to VHE requires a sane SCTLR_EL1 as a start */
> +	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
> +	msr_s	SYS_SCTLR_EL12, x0
> +
> +	/*
> +	 * Force an eret into a helper "function", and let it return
> +	 * to our original caller... This makes sure that we have
> +	 * initialised the basic PSTATE state.
> +	 */
> +	mov	x0, #INIT_PSTATE_EL2
> +	msr	spsr_el1, x0
> +	adr_l	x0, stick_to_vhe
> +	msr	elr_el1, x0
> +	eret

What does this do if CONFIG_VHE=n on one of these CPUs?

Will
Marc Zyngier March 26, 2021, 11:20 a.m. UTC | #2
On Thu, 25 Mar 2021 19:33:19 +0000,
Will Deacon <will@kernel.org> wrote:
> 
> On Thu, Mar 25, 2021 at 12:47:21PM +0000, Marc Zyngier wrote:
> > It seems that the CPUs part of the SoC known as Apple M1 have the
> > terrible habit of being stuck with HCR_EL2.E2H==1, in violation
> > of the architecture.
> > 
> > Try and work around this deplorable state of affairs by detecting
> > the stuck bit early and short-circuit the nVHE dance. Additional
> > filtering code ensures that attempts at switching to nVHE from
> > the command-line are also ignored.
> > 
> > It is still unknown whether there are many more such nuggets
> > to be found...
> > 
> > Reported-by: Hector Martin <marcan@marcan.st>
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> >  arch/arm64/kernel/head.S           | 33 +++++++++++++++++++++++++++---
> >  arch/arm64/kernel/hyp-stub.S       | 15 ++++++++++----
> >  arch/arm64/kernel/idreg-override.c | 13 +++++++++++-
> >  3 files changed, 53 insertions(+), 8 deletions(-)
> > 
> > diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> > index 840bda1869e9..db2de5b8f3d9 100644
> > --- a/arch/arm64/kernel/head.S
> > +++ b/arch/arm64/kernel/head.S
> > @@ -477,14 +477,13 @@ EXPORT_SYMBOL(kimage_vaddr)
> >   * booted in EL1 or EL2 respectively.
> >   */
> >  SYM_FUNC_START(init_kernel_el)
> > -	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
> > -	msr	sctlr_el1, x0
> > -
> >  	mrs	x0, CurrentEL
> >  	cmp	x0, #CurrentEL_EL2
> >  	b.eq	init_el2
> >  
> >  SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
> > +	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
> > +	msr	sctlr_el1, x0
> >  	isb
> >  	mov_q	x0, INIT_PSTATE_EL1
> >  	msr	spsr_el1, x0
> > @@ -504,6 +503,34 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
> >  	msr	vbar_el2, x0
> >  	isb
> >  
> > +	/*
> > +	 * Fruity CPUs seem to have HCR_EL2.E2H set to RES1,
> > +	 * making it impossible to start in nVHE mode. Is that
> > +	 * compliant with the architecture? Absolutely not!
> > +	 */
> > +	mrs	x0, hcr_el2
> > +	and	x0, x0, #HCR_E2H
> > +	cbz	x0, 1f
> > +
> > +	/* Switching to VHE requires a sane SCTLR_EL1 as a start */
> > +	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
> > +	msr_s	SYS_SCTLR_EL12, x0
> > +
> > +	/*
> > +	 * Force an eret into a helper "function", and let it return
> > +	 * to our original caller... This makes sure that we have
> > +	 * initialised the basic PSTATE state.
> > +	 */
> > +	mov	x0, #INIT_PSTATE_EL2
> > +	msr	spsr_el1, x0
> > +	adr_l	x0, stick_to_vhe
> > +	msr	elr_el1, x0
> > +	eret
> 
> What does this do if CONFIG_VHE=n on one of these CPUs?

Interesting question. With this patch, it will actually boot, and
behave just fine as long as you don't run a guest (the percpu offset
being stored in TPIDR_EL1 will then be corrupted, though you may not
even get there because of the sysreg renaming being unexpectedly
active).

I guess I could either make this code conditional on CONFIG_ARM64_VHE
and let the machine crash early without a word, or have some later
checks once the machine started booting. In the later case, displaying
anything useful is going to be a challenge though (the odds of someone
having a serial console on this box are close to nil). Pick your poison.

	M.
Will Deacon March 29, 2021, 10:22 a.m. UTC | #3
On Fri, Mar 26, 2021 at 11:20:18AM +0000, Marc Zyngier wrote:
> On Thu, 25 Mar 2021 19:33:19 +0000,
> Will Deacon <will@kernel.org> wrote:
> > 
> > On Thu, Mar 25, 2021 at 12:47:21PM +0000, Marc Zyngier wrote:
> > > It seems that the CPUs part of the SoC known as Apple M1 have the
> > > terrible habit of being stuck with HCR_EL2.E2H==1, in violation
> > > of the architecture.
> > > 
> > > Try and work around this deplorable state of affairs by detecting
> > > the stuck bit early and short-circuit the nVHE dance. Additional
> > > filtering code ensures that attempts at switching to nVHE from
> > > the command-line are also ignored.
> > > 
> > > It is still unknown whether there are many more such nuggets
> > > to be found...
> > > 
> > > Reported-by: Hector Martin <marcan@marcan.st>
> > > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > > ---
> > >  arch/arm64/kernel/head.S           | 33 +++++++++++++++++++++++++++---
> > >  arch/arm64/kernel/hyp-stub.S       | 15 ++++++++++----
> > >  arch/arm64/kernel/idreg-override.c | 13 +++++++++++-
> > >  3 files changed, 53 insertions(+), 8 deletions(-)
> > > 
> > > diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> > > index 840bda1869e9..db2de5b8f3d9 100644
> > > --- a/arch/arm64/kernel/head.S
> > > +++ b/arch/arm64/kernel/head.S
> > > @@ -477,14 +477,13 @@ EXPORT_SYMBOL(kimage_vaddr)
> > >   * booted in EL1 or EL2 respectively.
> > >   */
> > >  SYM_FUNC_START(init_kernel_el)
> > > -	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
> > > -	msr	sctlr_el1, x0
> > > -
> > >  	mrs	x0, CurrentEL
> > >  	cmp	x0, #CurrentEL_EL2
> > >  	b.eq	init_el2
> > >  
> > >  SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
> > > +	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
> > > +	msr	sctlr_el1, x0
> > >  	isb
> > >  	mov_q	x0, INIT_PSTATE_EL1
> > >  	msr	spsr_el1, x0
> > > @@ -504,6 +503,34 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
> > >  	msr	vbar_el2, x0
> > >  	isb
> > >  
> > > +	/*
> > > +	 * Fruity CPUs seem to have HCR_EL2.E2H set to RES1,
> > > +	 * making it impossible to start in nVHE mode. Is that
> > > +	 * compliant with the architecture? Absolutely not!
> > > +	 */
> > > +	mrs	x0, hcr_el2
> > > +	and	x0, x0, #HCR_E2H
> > > +	cbz	x0, 1f
> > > +
> > > +	/* Switching to VHE requires a sane SCTLR_EL1 as a start */
> > > +	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
> > > +	msr_s	SYS_SCTLR_EL12, x0
> > > +
> > > +	/*
> > > +	 * Force an eret into a helper "function", and let it return
> > > +	 * to our original caller... This makes sure that we have
> > > +	 * initialised the basic PSTATE state.
> > > +	 */
> > > +	mov	x0, #INIT_PSTATE_EL2
> > > +	msr	spsr_el1, x0
> > > +	adr_l	x0, stick_to_vhe
> > > +	msr	elr_el1, x0
> > > +	eret
> > 
> > What does this do if CONFIG_VHE=n on one of these CPUs?
> 
> Interesting question. With this patch, it will actually boot, and
> behave just fine as long as you don't run a guest (the percpu offset
> being stored in TPIDR_EL1 will then be corrupted, though you may not
> even get there because of the sysreg renaming being unexpectedly
> active).
> 
> I guess I could either make this code conditional on CONFIG_ARM64_VHE
> and let the machine crash early without a word, or have some later
> checks once the machine started booting. In the later case, displaying
> anything useful is going to be a challenge though (the odds of someone
> having a serial console on this box are close to nil). Pick your poison.

I think the best thing to do would be to fail to initialise KVM if the
kernel is stuck at EL2 but we don't have VHE support compiled in. Is that
do-able?

Will
Marc Zyngier March 30, 2021, 5 p.m. UTC | #4
On Mon, 29 Mar 2021 11:22:00 +0100,
Will Deacon <will@kernel.org> wrote:
> 
> On Fri, Mar 26, 2021 at 11:20:18AM +0000, Marc Zyngier wrote:
> > I guess I could either make this code conditional on CONFIG_ARM64_VHE
> > and let the machine crash early without a word, or have some later
> > checks once the machine started booting. In the later case, displaying
> > anything useful is going to be a challenge though (the odds of someone
> > having a serial console on this box are close to nil). Pick your poison.
> 
> I think the best thing to do would be to fail to initialise KVM if the
> kernel is stuck at EL2 but we don't have VHE support compiled in. Is that
> do-able?

To quote someone, it is "a little ugly on the side".

I came up with the following hack. Can't say I'm in love with it,
specially the sprinkling of checks in the alternative callbacks, but
hey, I can boot the machine without CONFIG_ARM64_VHE, and get the
expected splat at boot time:

[    0.033604] ------------[ cut here ]------------
[    0.033850] CPU: CPUs started in inconsistent modes
[    0.033854] WARNING: CPU: 0 PID: 1 at arch/arm64/kernel/smp.c:434 hyp_mode_check+0x90/0xc4
[    0.034863] Modules linked in:
[    0.035100] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.12.0-rc3-00103-geee3f110c447-dirty #3231
[    0.035776] Hardware name: Apple Mac mini (M1, 2020) (DT)
[    0.036192] pstate: 60400009 (nZCv daif +PAN -UAO -TCO BTYPE=--)
[    0.036654] pc : hyp_mode_check+0x90/0xc4
[    0.036963] lr : hyp_mode_check+0x90/0xc4
[    0.037271] sp : ffff800010053e30
[    0.037526] x29: ffff800010053e30 x28: 0000000000000000 
[    0.037935] x27: 0000000000000000 x26: 0000000000000000 
[    0.038344] x25: 0000000000000000 x24: 0000000000000000 
[    0.038754] x23: 0000000000000000 x22: 0000000000000000 
[    0.039163] x21: 0000000000000000 x20: ffffca3b2f53fc04 
[    0.039572] x19: ffffca3b2fac1000 x18: 0000000000000001 
[    0.039981] x17: 00000000cc4379d6 x16: 000000005c7b6156 
[    0.040391] x15: 0000000000000030 x14: ffffffffffffffff 
[    0.040800] x13: ffff800090053ab7 x12: ffff800010053ac0 
[    0.041209] x11: 0000000bbe2c6238 x10: ffffca3b2faa0ad8 
[    0.041618] x9 : ffffca3b2e310df0 x8 : fffffffffffe18b8 
[    0.042027] x7 : ffffca3b2fa481d8 x6 : 0000000000002ffd 
[    0.042437] x5 : 0000000000000000 x4 : 0000000000000000 
[    0.042846] x3 : 00000000ffffffff x2 : 0000000000000000 
[    0.043255] x1 : 0000000000000000 x0 : ffff4af181631280 
[    0.043665] Call trace:
[    0.043852]  hyp_mode_check+0x90/0xc4
[    0.044134]  smp_cpus_done+0x34/0x48
[    0.044409]  smp_init+0x80/0x90
[    0.044651]  kernel_init_freeable+0x108/0x160
[    0.044986]  kernel_init+0x20/0x12c
[    0.045254]  ret_from_fork+0x10/0x3c
[    0.045530] ---[ end trace 0736417247c9e9a3 ]---
[...]
[    0.616800] kvm [1]: HYP mode not available

I'll wrap that up in a separate patch, and we can then discuss whether
we really want it...

Thanks,

	M.

diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 7379f35ae2c6..69bc4e26aa26 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -72,6 +72,11 @@ void __hyp_reset_vectors(void);
 
 DECLARE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
 
+static inline bool is_kernel_in_hyp_mode(void)
+{
+	return read_sysreg(CurrentEL) == CurrentEL_EL2;
+}
+
 /* Reports the availability of HYP mode */
 static inline bool is_hyp_mode_available(void)
 {
@@ -83,6 +88,10 @@ static inline bool is_hyp_mode_available(void)
 	    static_branch_likely(&kvm_protected_mode_initialized))
 		return true;
 
+	/* Catch braindead CPUs */
+	if (!IS_ENABLED(CONFIG_ARM64_VHE) && is_kernel_in_hyp_mode())
+		return false;
+
 	return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
 		__boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
 }
@@ -98,12 +107,11 @@ static inline bool is_hyp_mode_mismatched(void)
 	    static_branch_likely(&kvm_protected_mode_initialized))
 		return false;
 
-	return __boot_cpu_mode[0] != __boot_cpu_mode[1];
-}
+	/* Catch braindead CPUs */
+	if (!IS_ENABLED(CONFIG_ARM64_VHE) && is_kernel_in_hyp_mode())
+		return true;
 
-static inline bool is_kernel_in_hyp_mode(void)
-{
-	return read_sysreg(CurrentEL) == CurrentEL_EL2;
+	return __boot_cpu_mode[0] != __boot_cpu_mode[1];
 }
 
 static __always_inline bool has_vhe(void)
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index 978301392d67..edb048654e00 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -156,6 +156,9 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
 {
 	int i;
 
+	if (!is_hyp_mode_available())
+		return;
+
 	BUG_ON(nr_inst != 5);
 
 	for (i = 0; i < nr_inst; i++) {
@@ -191,6 +194,9 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
 	u64 addr;
 	u32 insn;
 
+	if (!is_hyp_mode_available())
+		return;
+
 	BUG_ON(nr_inst != 4);
 
 	if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe()))
@@ -244,6 +250,9 @@ static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst
 {
 	u32 insn, oinsn, rd;
 
+	if (!is_hyp_mode_available())
+		return;
+
 	BUG_ON(nr_inst != 4);
 
 	/* Compute target register */
diff mbox series

Patch

diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 840bda1869e9..db2de5b8f3d9 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -477,14 +477,13 @@  EXPORT_SYMBOL(kimage_vaddr)
  * booted in EL1 or EL2 respectively.
  */
 SYM_FUNC_START(init_kernel_el)
-	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
-	msr	sctlr_el1, x0
-
 	mrs	x0, CurrentEL
 	cmp	x0, #CurrentEL_EL2
 	b.eq	init_el2
 
 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
+	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
+	msr	sctlr_el1, x0
 	isb
 	mov_q	x0, INIT_PSTATE_EL1
 	msr	spsr_el1, x0
@@ -504,6 +503,34 @@  SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
 	msr	vbar_el2, x0
 	isb
 
+	/*
+	 * Fruity CPUs seem to have HCR_EL2.E2H set to RES1,
+	 * making it impossible to start in nVHE mode. Is that
+	 * compliant with the architecture? Absolutely not!
+	 */
+	mrs	x0, hcr_el2
+	and	x0, x0, #HCR_E2H
+	cbz	x0, 1f
+
+	/* Switching to VHE requires a sane SCTLR_EL1 as a start */
+	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
+	msr_s	SYS_SCTLR_EL12, x0
+
+	/*
+	 * Force an eret into a helper "function", and let it return
+	 * to our original caller... This makes sure that we have
+	 * initialised the basic PSTATE state.
+	 */
+	mov	x0, #INIT_PSTATE_EL2
+	msr	spsr_el1, x0
+	adr_l	x0, stick_to_vhe
+	msr	elr_el1, x0
+	eret
+
+1:
+	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
+	msr	sctlr_el1, x0
+
 	msr	elr_el2, lr
 	mov	w0, #BOOT_CPU_MODE_EL2
 	eret
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 5eccbd62fec8..e6fa5cdd790a 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -27,12 +27,12 @@  SYM_CODE_START(__hyp_stub_vectors)
 	ventry	el2_fiq_invalid			// FIQ EL2t
 	ventry	el2_error_invalid		// Error EL2t
 
-	ventry	el2_sync_invalid		// Synchronous EL2h
+	ventry	elx_sync			// Synchronous EL2h
 	ventry	el2_irq_invalid			// IRQ EL2h
 	ventry	el2_fiq_invalid			// FIQ EL2h
 	ventry	el2_error_invalid		// Error EL2h
 
-	ventry	el1_sync			// Synchronous 64-bit EL1
+	ventry	elx_sync			// Synchronous 64-bit EL1
 	ventry	el1_irq_invalid			// IRQ 64-bit EL1
 	ventry	el1_fiq_invalid			// FIQ 64-bit EL1
 	ventry	el1_error_invalid		// Error 64-bit EL1
@@ -45,7 +45,7 @@  SYM_CODE_END(__hyp_stub_vectors)
 
 	.align 11
 
-SYM_CODE_START_LOCAL(el1_sync)
+SYM_CODE_START_LOCAL(elx_sync)
 	cmp	x0, #HVC_SET_VECTORS
 	b.ne	1f
 	msr	vbar_el2, x1
@@ -71,7 +71,7 @@  SYM_CODE_START_LOCAL(el1_sync)
 
 9:	mov	x0, xzr
 	eret
-SYM_CODE_END(el1_sync)
+SYM_CODE_END(elx_sync)
 
 // nVHE? No way! Give me the real thing!
 SYM_CODE_START_LOCAL(mutate_to_vhe)
@@ -243,3 +243,10 @@  SYM_FUNC_START(switch_to_vhe)
 #endif
 	ret
 SYM_FUNC_END(switch_to_vhe)
+
+SYM_FUNC_START(stick_to_vhe)
+	mov	x0, #HVC_VHE_RESTART
+	hvc	#0
+	mov	x0, #BOOT_CPU_MODE_EL2
+	ret
+SYM_FUNC_END(stick_to_vhe)
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index be92fcd319a1..6a8a14955fba 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -29,11 +29,22 @@  struct ftr_set_desc {
 	} 				fields[];
 };
 
+static bool __init mmfr1_vh_filter(u64 val)
+{
+	/*
+	 * If we ever reach this point while running VHE, we're
+	 * guaranteed to be on one of these funky, VHE-stuck CPUs. If
+	 * the user was trying to force nVHE on us, proceed with
+	 * attitude adjustment.
+	 */
+	return !(is_kernel_in_hyp_mode() && val == 0);
+}
+
 static const struct ftr_set_desc mmfr1 __initconst = {
 	.name		= "id_aa64mmfr1",
 	.override	= &id_aa64mmfr1_override,
 	.fields		= {
-	        { "vh", ID_AA64MMFR1_VHE_SHIFT },
+	        { "vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter },
 		{}
 	},
 };