diff mbox series

[1/5] KVM: arm64: Correctly honor the presence of FEAT_TCRX

Message ID 20240625130042.259175-2-maz@kernel.org (mailing list archive)
State New
Headers show
Series KVM: arm64: Fix handling of TCR2_EL1 | expand

Commit Message

Marc Zyngier June 25, 2024, 1 p.m. UTC
We currently blindly enable TCR2_EL1 use in a guest, irrespective
of the feature set. This is obviously wrong, and we should actually
honor the guest configuration and handle the possible trap resulting
from the guest being buggy.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_arm.h | 2 +-
 arch/arm64/kvm/sys_regs.c        | 9 +++++++++
 2 files changed, 10 insertions(+), 1 deletion(-)

Comments

Joey Gouly June 25, 2024, 2:37 p.m. UTC | #1
On Tue, Jun 25, 2024 at 02:00:37PM +0100, Marc Zyngier wrote:
> We currently blindly enable TCR2_EL1 use in a guest, irrespective
> of the feature set. This is obviously wrong, and we should actually
> honor the guest configuration and handle the possible trap resulting
> from the guest being buggy.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/include/asm/kvm_arm.h | 2 +-
>  arch/arm64/kvm/sys_regs.c        | 9 +++++++++
>  2 files changed, 10 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
> index b2adc2c6c82a5..e6682a3ace5af 100644
> --- a/arch/arm64/include/asm/kvm_arm.h
> +++ b/arch/arm64/include/asm/kvm_arm.h
> @@ -102,7 +102,7 @@
>  #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
>  #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
>  
> -#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En)
> +#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME)
>  #define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
>  
>  /* TCR_EL2 Registers bits */
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index 22b45a15d0688..71996d36f3751 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -383,6 +383,12 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
>  	bool was_enabled = vcpu_has_cache_enabled(vcpu);
>  	u64 val, mask, shift;
>  
> +	if (reg_to_encoding(r) == SYS_TCR2_EL1 &&
> +	    !kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) {
> +		kvm_inject_undefined(vcpu);
> +		return false;
> +	}
> +

If we need to start doing this with more vm(sa) registers, it might make sense
to think of a way to do this without putting a big if/else in here.  For now
this is seems fine.

>  	BUG_ON(!p->is_write);
>  
>  	get_access_mask(r, &mask, &shift);
> @@ -4060,6 +4066,9 @@ void kvm_init_sysreg(struct kvm_vcpu *vcpu)
>  
>  		if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
>  			vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
> +
> +		if (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
> +			vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
>  	}
>  
>  	if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))

Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Marc Zyngier June 25, 2024, 6:22 p.m. UTC | #2
On Tue, 25 Jun 2024 15:37:34 +0100,
Joey Gouly <joey.gouly@arm.com> wrote:
> 
> On Tue, Jun 25, 2024 at 02:00:37PM +0100, Marc Zyngier wrote:
> > We currently blindly enable TCR2_EL1 use in a guest, irrespective
> > of the feature set. This is obviously wrong, and we should actually
> > honor the guest configuration and handle the possible trap resulting
> > from the guest being buggy.
> > 
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> >  arch/arm64/include/asm/kvm_arm.h | 2 +-
> >  arch/arm64/kvm/sys_regs.c        | 9 +++++++++
> >  2 files changed, 10 insertions(+), 1 deletion(-)
> > 
> > diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
> > index b2adc2c6c82a5..e6682a3ace5af 100644
> > --- a/arch/arm64/include/asm/kvm_arm.h
> > +++ b/arch/arm64/include/asm/kvm_arm.h
> > @@ -102,7 +102,7 @@
> >  #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
> >  #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
> >  
> > -#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En)
> > +#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME)
> >  #define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
> >  
> >  /* TCR_EL2 Registers bits */
> > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> > index 22b45a15d0688..71996d36f3751 100644
> > --- a/arch/arm64/kvm/sys_regs.c
> > +++ b/arch/arm64/kvm/sys_regs.c
> > @@ -383,6 +383,12 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
> >  	bool was_enabled = vcpu_has_cache_enabled(vcpu);
> >  	u64 val, mask, shift;
> >  
> > +	if (reg_to_encoding(r) == SYS_TCR2_EL1 &&
> > +	    !kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) {
> > +		kvm_inject_undefined(vcpu);
> > +		return false;
> > +	}
> > +
> 
> If we need to start doing this with more vm(sa) registers, it might make sense
> to think of a way to do this without putting a big if/else in here.  For now
> this is seems fine.

One possible solution would be to mimic the FGU behaviour and have a
shadow version of HCRX_EL2 that only indicates the trap routing code
that something trapped through that bit needs to UNDEF.

And yes, I'd expect we'll see a whole lot of new VMSA registers going
the same way.

> 
> >  	BUG_ON(!p->is_write);
> >  
> >  	get_access_mask(r, &mask, &shift);
> > @@ -4060,6 +4066,9 @@ void kvm_init_sysreg(struct kvm_vcpu *vcpu)
> >  
> >  		if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
> >  			vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
> > +
> > +		if (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
> > +			vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
> >  	}
> >  
> >  	if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
> 
> Reviewed-by: Joey Gouly <joey.gouly@arm.com>

Thanks!

	M.
Oliver Upton June 26, 2024, 11:55 p.m. UTC | #3
On Tue, Jun 25, 2024 at 07:22:53PM +0100, Marc Zyngier wrote:
> On Tue, 25 Jun 2024 15:37:34 +0100,
> Joey Gouly <joey.gouly@arm.com> wrote:
> > 
> > On Tue, Jun 25, 2024 at 02:00:37PM +0100, Marc Zyngier wrote:
> > > We currently blindly enable TCR2_EL1 use in a guest, irrespective
> > > of the feature set. This is obviously wrong, and we should actually
> > > honor the guest configuration and handle the possible trap resulting
> > > from the guest being buggy.
> > > 
> > > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > > ---
> > >  arch/arm64/include/asm/kvm_arm.h | 2 +-
> > >  arch/arm64/kvm/sys_regs.c        | 9 +++++++++
> > >  2 files changed, 10 insertions(+), 1 deletion(-)
> > > 
> > > diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
> > > index b2adc2c6c82a5..e6682a3ace5af 100644
> > > --- a/arch/arm64/include/asm/kvm_arm.h
> > > +++ b/arch/arm64/include/asm/kvm_arm.h
> > > @@ -102,7 +102,7 @@
> > >  #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
> > >  #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
> > >  
> > > -#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En)
> > > +#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME)
> > >  #define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
> > >  
> > >  /* TCR_EL2 Registers bits */
> > > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> > > index 22b45a15d0688..71996d36f3751 100644
> > > --- a/arch/arm64/kvm/sys_regs.c
> > > +++ b/arch/arm64/kvm/sys_regs.c
> > > @@ -383,6 +383,12 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
> > >  	bool was_enabled = vcpu_has_cache_enabled(vcpu);
> > >  	u64 val, mask, shift;
> > >  
> > > +	if (reg_to_encoding(r) == SYS_TCR2_EL1 &&
> > > +	    !kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) {
> > > +		kvm_inject_undefined(vcpu);
> > > +		return false;
> > > +	}
> > > +
> > 
> > If we need to start doing this with more vm(sa) registers, it might make sense
> > to think of a way to do this without putting a big if/else in here.  For now
> > this is seems fine.
> 
> One possible solution would be to mimic the FGU behaviour and have a
> shadow version of HCRX_EL2 that only indicates the trap routing code
> that something trapped through that bit needs to UNDEF.

Seems reasonable, but that'll be the problem for the _next_ person to
add an affected register ;-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index b2adc2c6c82a5..e6682a3ace5af 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -102,7 +102,7 @@ 
 #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
 #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
 
-#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En)
+#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME)
 #define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
 
 /* TCR_EL2 Registers bits */
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 22b45a15d0688..71996d36f3751 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -383,6 +383,12 @@  static bool access_vm_reg(struct kvm_vcpu *vcpu,
 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
 	u64 val, mask, shift;
 
+	if (reg_to_encoding(r) == SYS_TCR2_EL1 &&
+	    !kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) {
+		kvm_inject_undefined(vcpu);
+		return false;
+	}
+
 	BUG_ON(!p->is_write);
 
 	get_access_mask(r, &mask, &shift);
@@ -4060,6 +4066,9 @@  void kvm_init_sysreg(struct kvm_vcpu *vcpu)
 
 		if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
 			vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
+
+		if (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
+			vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
 	}
 
 	if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))