diff mbox series

[v2,04/25] KVM: arm64: nv: Add sanitising to EL2 configuration registers

Message ID 20240130204533.693853-5-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM/arm64: VM configuration enforcement | expand

Commit Message

Marc Zyngier Jan. 30, 2024, 8:45 p.m. UTC
We can now start making use of our sanitising masks by setting them
to values that depend on the guest's configuration.

First up are VTTBR_EL2, VTCR_EL2, VMPIDR_EL2 and HCR_EL2.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kvm/nested.c | 56 ++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 55 insertions(+), 1 deletion(-)

Comments

Joey Gouly Jan. 31, 2024, 5:16 p.m. UTC | #1
On Tue, Jan 30, 2024 at 08:45:11PM +0000, Marc Zyngier wrote:
> We can now start making use of our sanitising masks by setting them
> to values that depend on the guest's configuration.
> 
> First up are VTTBR_EL2, VTCR_EL2, VMPIDR_EL2 and HCR_EL2.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/kvm/nested.c | 56 ++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 55 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> index c976cd4b8379..ee461e630527 100644
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -181,7 +181,7 @@ u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
>  	return v;
>  }
>  
> -static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
> +static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
>  {
>  	int i = sr - __VNCR_START__;
>  
> @@ -191,6 +191,7 @@ static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u
>  
>  int kvm_init_nv_sysregs(struct kvm *kvm)
>  {
> +	u64 res0, res1;
>  	int ret = 0;
>  
>  	mutex_lock(&kvm->arch.config_lock);
> @@ -209,6 +210,59 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
>  		kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
>  						       kvm->arch.id_regs[i]);
>  
> +	/* VTTBR_EL2 */
> +	res0 = res1 = 0;
> +	if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
> +		res0 |= GENMASK(63, 56);
> +	set_sysreg_masks(kvm, VTTBR_EL2, res0, res1);

CnP?

> +
> +	/* VTCR_EL2 */
> +	res0 = GENMASK(63, 32) | GENMASK(30, 20);
> +	res1 = BIT(31);
> +	set_sysreg_masks(kvm, VTCR_EL2, res0, res1);
> +
> +	/* VMPIDR_EL2 */
> +	res0 = GENMASK(63, 40) | GENMASK(30, 24);
> +	res1 = BIT(31);
> +	set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
> +
> +	/* HCR_EL2 */
> +	res0 = BIT(48);
> +	res1 = HCR_RW;
> +	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, TWED, IMP))
> +		res0 |= GENMASK(63, 59);
> +	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, MTE2))
> +		res0 |= (HCR_TID5 | HCR_DCT | HCR_ATA);
> +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, TTLBxS))
> +		res0 |= (HCR_TTLBIS | HCR_TTLBOS);
> +	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
> +	    !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
> +		res1 = HCR_ENSCXT;

I'm confused here. If the VM doesn't have either CSV2_2 or CSV2_1p2.. HCR_ENSCXT is res1, that means we wouldn't trap?

The Arm ARM says:

	EnSCXT, bit [53]
		When FEAT_CSV2_2 is implemented or FEAT_CSV2_1p2 is implemented:

			[..]

		Otherwise:
			RES0

And if you actually meant res1, then you need: res1 |= HCR_ENSCXT, otherwise you override the HCR_RW res1 bit!

> +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, IMP))
> +		res0 |= (HCR_TID4 | HCR_TICAB | HCR_TOCU);

minor nitpick: can you reverse these TOCU | TICAB | TID4 so it matches the relative positions in the register..

> +	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
> +		res0 |= HCR_AMVOFFEN;

We currently mask out ID_AA64PFR0_EL1.AMU (read_sanitised_id_aa64pfr0_el1 and
part of the .val in the sys_regs_desc[]), can't hurt to be feature proof.

> +	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1))
> +		res0 |= HCR_FIEN;
> +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, FWB, IMP))
> +		res0 |= HCR_FWB;
> +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, NV2))
> +		res0 |= HCR_NV2;
> +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP))
> +		res0 |= (HCR_AT | HCR_NV1 | HCR_NV);
> +	if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
> +	      __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS)))
> +		res0 |= (HCR_API | HCR_APK);
> +	if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP))
> +		res0 |= BIT(39);
> +	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
> +		res0 |= (HCR_TERR | HCR_TEA);

Same annoying nitpick, flip the values order to match the positions.

> +	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
> +		res0 |= HCR_TLOR;
> +	if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, IMP))
> +		res1 |= HCR_E2H;
> +	set_sysreg_masks(kvm, HCR_EL2, res0, res1);
> +
>  out:
>  	mutex_unlock(&kvm->arch.config_lock);
>  

I need a lie down now.

Thanks,
Joey
Joey Gouly Feb. 1, 2024, 2:56 p.m. UTC | #2
On Tue, Jan 30, 2024 at 08:45:11PM +0000, Marc Zyngier wrote:
> We can now start making use of our sanitising masks by setting them
> to values that depend on the guest's configuration.
> 
> First up are VTTBR_EL2, VTCR_EL2, VMPIDR_EL2 and HCR_EL2.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/kvm/nested.c | 56 ++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 55 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> index c976cd4b8379..ee461e630527 100644
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -181,7 +181,7 @@ u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
>  	return v;
>  }
>  
> -static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
> +static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
>  {
>  	int i = sr - __VNCR_START__;
>  
> @@ -191,6 +191,7 @@ static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u
>  
>  int kvm_init_nv_sysregs(struct kvm *kvm)
>  {
> +	u64 res0, res1;
>  	int ret = 0;
>  
>  	mutex_lock(&kvm->arch.config_lock);
> @@ -209,6 +210,59 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
>  		kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
>  						       kvm->arch.id_regs[i]);
>  
> +	/* VTTBR_EL2 */
> +	res0 = res1 = 0;
> +	if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
> +		res0 |= GENMASK(63, 56);
> +	set_sysreg_masks(kvm, VTTBR_EL2, res0, res1);
> +
> +	/* VTCR_EL2 */
> +	res0 = GENMASK(63, 32) | GENMASK(30, 20);
> +	res1 = BIT(31);
> +	set_sysreg_masks(kvm, VTCR_EL2, res0, res1);
> +
> +	/* VMPIDR_EL2 */
> +	res0 = GENMASK(63, 40) | GENMASK(30, 24);
> +	res1 = BIT(31);
> +	set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
> +
> +	/* HCR_EL2 */
> +	res0 = BIT(48);
> +	res1 = HCR_RW;

Just want to clarify this bit actually, this is restricting the (first level
only?) nested EL1 to run as AArch64?
Should we be sanitising ID_AA64PFR0_EL1.EL1=0b0001?

> +	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, TWED, IMP))
> +		res0 |= GENMASK(63, 59);
> +	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, MTE2))
> +		res0 |= (HCR_TID5 | HCR_DCT | HCR_ATA);
> +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, TTLBxS))
> +		res0 |= (HCR_TTLBIS | HCR_TTLBOS);
> +	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
> +	    !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
> +		res1 = HCR_ENSCXT;
> +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, IMP))
> +		res0 |= (HCR_TID4 | HCR_TICAB | HCR_TOCU);
> +	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
> +		res0 |= HCR_AMVOFFEN;
> +	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1))
> +		res0 |= HCR_FIEN;
> +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, FWB, IMP))
> +		res0 |= HCR_FWB;
> +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, NV2))
> +		res0 |= HCR_NV2;
> +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP))
> +		res0 |= (HCR_AT | HCR_NV1 | HCR_NV);
> +	if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
> +	      __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS)))
> +		res0 |= (HCR_API | HCR_APK);
> +	if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP))
> +		res0 |= BIT(39);
> +	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
> +		res0 |= (HCR_TERR | HCR_TEA);
> +	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
> +		res0 |= HCR_TLOR;
> +	if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, IMP))
> +		res1 |= HCR_E2H;
> +	set_sysreg_masks(kvm, HCR_EL2, res0, res1);
> +
>  out:
>  	mutex_unlock(&kvm->arch.config_lock);
>  

Thanks,
Joey
Marc Zyngier Feb. 2, 2024, 3:05 p.m. UTC | #3
On Wed, 31 Jan 2024 17:16:02 +0000,
Joey Gouly <joey.gouly@arm.com> wrote:
> 
> On Tue, Jan 30, 2024 at 08:45:11PM +0000, Marc Zyngier wrote:
> > We can now start making use of our sanitising masks by setting them
> > to values that depend on the guest's configuration.
> > 
> > First up are VTTBR_EL2, VTCR_EL2, VMPIDR_EL2 and HCR_EL2.
> > 
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> >  arch/arm64/kvm/nested.c | 56 ++++++++++++++++++++++++++++++++++++++++-
> >  1 file changed, 55 insertions(+), 1 deletion(-)
> > 
> > diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> > index c976cd4b8379..ee461e630527 100644
> > --- a/arch/arm64/kvm/nested.c
> > +++ b/arch/arm64/kvm/nested.c
> > @@ -181,7 +181,7 @@ u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
> >  	return v;
> >  }
> >  
> > -static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
> > +static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
> >  {
> >  	int i = sr - __VNCR_START__;
> >  
> > @@ -191,6 +191,7 @@ static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u
> >  
> >  int kvm_init_nv_sysregs(struct kvm *kvm)
> >  {
> > +	u64 res0, res1;
> >  	int ret = 0;
> >  
> >  	mutex_lock(&kvm->arch.config_lock);
> > @@ -209,6 +210,59 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
> >  		kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
> >  						       kvm->arch.id_regs[i]);
> >  
> > +	/* VTTBR_EL2 */
> > +	res0 = res1 = 0;
> > +	if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
> > +		res0 |= GENMASK(63, 56);
> > +	set_sysreg_masks(kvm, VTTBR_EL2, res0, res1);
> 
> CnP?

Missing indeed. I'll add it.

> 
> > +
> > +	/* VTCR_EL2 */
> > +	res0 = GENMASK(63, 32) | GENMASK(30, 20);
> > +	res1 = BIT(31);
> > +	set_sysreg_masks(kvm, VTCR_EL2, res0, res1);
> > +
> > +	/* VMPIDR_EL2 */
> > +	res0 = GENMASK(63, 40) | GENMASK(30, 24);
> > +	res1 = BIT(31);
> > +	set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
> > +
> > +	/* HCR_EL2 */
> > +	res0 = BIT(48);
> > +	res1 = HCR_RW;
> > +	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, TWED, IMP))
> > +		res0 |= GENMASK(63, 59);
> > +	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, MTE2))
> > +		res0 |= (HCR_TID5 | HCR_DCT | HCR_ATA);
> > +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, TTLBxS))
> > +		res0 |= (HCR_TTLBIS | HCR_TTLBOS);
> > +	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
> > +	    !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
> > +		res1 = HCR_ENSCXT;
> 
> I'm confused here. If the VM doesn't have either CSV2_2 or CSV2_1p2.. HCR_ENSCXT is res1, that means we wouldn't trap?
> 
> The Arm ARM says:
> 
> 	EnSCXT, bit [53]
> 		When FEAT_CSV2_2 is implemented or FEAT_CSV2_1p2 is implemented:
> 
> 			[..]
> 
> 		Otherwise:
> 			RES0
> 
> And if you actually meant res1, then you need: res1 |= HCR_ENSCXT, otherwise you override the HCR_RW res1 bit!

You're right on all counts. This is a total thinko, coupled with a
pretty bad bug.

>
> > +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, IMP))
> > +		res0 |= (HCR_TID4 | HCR_TICAB | HCR_TOCU);
> 
> minor nitpick: can you reverse these TOCU | TICAB | TID4 so it matches the relative positions in the register..

Sure can.

>
> > +	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
> > +		res0 |= HCR_AMVOFFEN;
> 
> We currently mask out ID_AA64PFR0_EL1.AMU (read_sanitised_id_aa64pfr0_el1 and
> part of the .val in the sys_regs_desc[]), can't hurt to be feature
> proof.

That's my idea. We shouldn't have to worry about any of that in the
core code, treat all features as potentially implemented, and actively
check for them being disabled.

> 
> > +	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1))
> > +		res0 |= HCR_FIEN;
> > +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, FWB, IMP))
> > +		res0 |= HCR_FWB;
> > +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, NV2))
> > +		res0 |= HCR_NV2;
> > +	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP))
> > +		res0 |= (HCR_AT | HCR_NV1 | HCR_NV);
> > +	if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
> > +	      __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS)))
> > +		res0 |= (HCR_API | HCR_APK);
> > +	if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP))
> > +		res0 |= BIT(39);
> > +	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
> > +		res0 |= (HCR_TERR | HCR_TEA);
> 
> Same annoying nitpick, flip the values order to match the positions.

Will do.

> 
> > +	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
> > +		res0 |= HCR_TLOR;
> > +	if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, IMP))
> > +		res1 |= HCR_E2H;
> > +	set_sysreg_masks(kvm, HCR_EL2, res0, res1);
> > +
> >  out:
> >  	mutex_unlock(&kvm->arch.config_lock);
> >  
> 
> I need a lie down now.

I know the feeling. This sort of crap ruins your day. Again, your
effort is much appreciated!

Thanks,

	M.
Marc Zyngier Feb. 2, 2024, 3:10 p.m. UTC | #4
On Thu, 01 Feb 2024 14:56:07 +0000,
Joey Gouly <joey.gouly@arm.com> wrote:
> 
> On Tue, Jan 30, 2024 at 08:45:11PM +0000, Marc Zyngier wrote:
> > We can now start making use of our sanitising masks by setting them
> > to values that depend on the guest's configuration.
> > 
> > First up are VTTBR_EL2, VTCR_EL2, VMPIDR_EL2 and HCR_EL2.
> > 
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> >  arch/arm64/kvm/nested.c | 56 ++++++++++++++++++++++++++++++++++++++++-
> >  1 file changed, 55 insertions(+), 1 deletion(-)
> > 
> > diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> > index c976cd4b8379..ee461e630527 100644
> > --- a/arch/arm64/kvm/nested.c
> > +++ b/arch/arm64/kvm/nested.c
> > @@ -181,7 +181,7 @@ u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
> >  	return v;
> >  }
> >  
> > -static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
> > +static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
> >  {
> >  	int i = sr - __VNCR_START__;
> >  
> > @@ -191,6 +191,7 @@ static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u
> >  
> >  int kvm_init_nv_sysregs(struct kvm *kvm)
> >  {
> > +	u64 res0, res1;
> >  	int ret = 0;
> >  
> >  	mutex_lock(&kvm->arch.config_lock);
> > @@ -209,6 +210,59 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
> >  		kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
> >  						       kvm->arch.id_regs[i]);
> >  
> > +	/* VTTBR_EL2 */
> > +	res0 = res1 = 0;
> > +	if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
> > +		res0 |= GENMASK(63, 56);
> > +	set_sysreg_masks(kvm, VTTBR_EL2, res0, res1);
> > +
> > +	/* VTCR_EL2 */
> > +	res0 = GENMASK(63, 32) | GENMASK(30, 20);
> > +	res1 = BIT(31);
> > +	set_sysreg_masks(kvm, VTCR_EL2, res0, res1);
> > +
> > +	/* VMPIDR_EL2 */
> > +	res0 = GENMASK(63, 40) | GENMASK(30, 24);
> > +	res1 = BIT(31);
> > +	set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
> > +
> > +	/* HCR_EL2 */
> > +	res0 = BIT(48);
> > +	res1 = HCR_RW;
> 
> Just want to clarify this bit actually, this is restricting the (first level
> only?) nested EL1 to run as AArch64?

This is restricting it at all levels. The guest hypervisor will
eventually write its own view of HCR_EL2.RW in the VNCR page, and if
it has promised AArch32 to its own guest (at any level), it is in for
a treat, because we will forcefully reset this bit to 1.

> Should we be sanitising ID_AA64PFR0_EL1.EL1=0b0001?

We already do. See limit_nv_id_reg() and how it constraints
ID_AA64PFR0_EL1.ELx to 0b0001.

Thanks,

	M.
Joey Gouly Feb. 2, 2024, 4:26 p.m. UTC | #5
On Fri, Feb 02, 2024 at 03:10:26PM +0000, Marc Zyngier wrote:
> On Thu, 01 Feb 2024 14:56:07 +0000,
> Joey Gouly <joey.gouly@arm.com> wrote:
> > 
> > On Tue, Jan 30, 2024 at 08:45:11PM +0000, Marc Zyngier wrote:
> > > We can now start making use of our sanitising masks by setting them
> > > to values that depend on the guest's configuration.
> > > 
> > > First up are VTTBR_EL2, VTCR_EL2, VMPIDR_EL2 and HCR_EL2.
> > > 
> > > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > > ---
> > >  arch/arm64/kvm/nested.c | 56 ++++++++++++++++++++++++++++++++++++++++-
> > >  1 file changed, 55 insertions(+), 1 deletion(-)
> > > 
> > > diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> > > index c976cd4b8379..ee461e630527 100644
> > > --- a/arch/arm64/kvm/nested.c
> > > +++ b/arch/arm64/kvm/nested.c
> > > @@ -181,7 +181,7 @@ u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
> > >  	return v;
> > >  }
> > >  
> > > -static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
> > > +static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
> > >  {
> > >  	int i = sr - __VNCR_START__;
> > >  
> > > @@ -191,6 +191,7 @@ static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u
> > >  
> > >  int kvm_init_nv_sysregs(struct kvm *kvm)
> > >  {
> > > +	u64 res0, res1;
> > >  	int ret = 0;
> > >  
> > >  	mutex_lock(&kvm->arch.config_lock);
> > > @@ -209,6 +210,59 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
> > >  		kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
> > >  						       kvm->arch.id_regs[i]);
> > >  
> > > +	/* VTTBR_EL2 */
> > > +	res0 = res1 = 0;
> > > +	if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
> > > +		res0 |= GENMASK(63, 56);
> > > +	set_sysreg_masks(kvm, VTTBR_EL2, res0, res1);
> > > +
> > > +	/* VTCR_EL2 */
> > > +	res0 = GENMASK(63, 32) | GENMASK(30, 20);
> > > +	res1 = BIT(31);
> > > +	set_sysreg_masks(kvm, VTCR_EL2, res0, res1);
> > > +
> > > +	/* VMPIDR_EL2 */
> > > +	res0 = GENMASK(63, 40) | GENMASK(30, 24);
> > > +	res1 = BIT(31);
> > > +	set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
> > > +
> > > +	/* HCR_EL2 */
> > > +	res0 = BIT(48);
> > > +	res1 = HCR_RW;
> > 
> > Just want to clarify this bit actually, this is restricting the (first level
> > only?) nested EL1 to run as AArch64?
> 
> This is restricting it at all levels. The guest hypervisor will
> eventually write its own view of HCR_EL2.RW in the VNCR page, and if
> it has promised AArch32 to its own guest (at any level), it is in for
> a treat, because we will forcefully reset this bit to 1.

Alright, thanks, I'll have to work through that some more.

> 
> > Should we be sanitising ID_AA64PFR0_EL1.EL1=0b0001?
> 
> We already do. See limit_nv_id_reg() and how it constraints
> ID_AA64PFR0_EL1.ELx to 0b0001.
> 

I forgot about that, was looking at read_sanitised_id_aa64pfr0_el1() in the non-
NV parts.

Thanks,
Joey
diff mbox series

Patch

diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index c976cd4b8379..ee461e630527 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -181,7 +181,7 @@  u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
 	return v;
 }
 
-static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
+static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
 {
 	int i = sr - __VNCR_START__;
 
@@ -191,6 +191,7 @@  static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u
 
 int kvm_init_nv_sysregs(struct kvm *kvm)
 {
+	u64 res0, res1;
 	int ret = 0;
 
 	mutex_lock(&kvm->arch.config_lock);
@@ -209,6 +210,59 @@  int kvm_init_nv_sysregs(struct kvm *kvm)
 		kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
 						       kvm->arch.id_regs[i]);
 
+	/* VTTBR_EL2 */
+	res0 = res1 = 0;
+	if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
+		res0 |= GENMASK(63, 56);
+	set_sysreg_masks(kvm, VTTBR_EL2, res0, res1);
+
+	/* VTCR_EL2 */
+	res0 = GENMASK(63, 32) | GENMASK(30, 20);
+	res1 = BIT(31);
+	set_sysreg_masks(kvm, VTCR_EL2, res0, res1);
+
+	/* VMPIDR_EL2 */
+	res0 = GENMASK(63, 40) | GENMASK(30, 24);
+	res1 = BIT(31);
+	set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
+
+	/* HCR_EL2 */
+	res0 = BIT(48);
+	res1 = HCR_RW;
+	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, TWED, IMP))
+		res0 |= GENMASK(63, 59);
+	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, MTE2))
+		res0 |= (HCR_TID5 | HCR_DCT | HCR_ATA);
+	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, TTLBxS))
+		res0 |= (HCR_TTLBIS | HCR_TTLBOS);
+	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
+	    !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
+		res1 = HCR_ENSCXT;
+	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, IMP))
+		res0 |= (HCR_TID4 | HCR_TICAB | HCR_TOCU);
+	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
+		res0 |= HCR_AMVOFFEN;
+	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1))
+		res0 |= HCR_FIEN;
+	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, FWB, IMP))
+		res0 |= HCR_FWB;
+	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, NV2))
+		res0 |= HCR_NV2;
+	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP))
+		res0 |= (HCR_AT | HCR_NV1 | HCR_NV);
+	if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
+	      __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS)))
+		res0 |= (HCR_API | HCR_APK);
+	if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP))
+		res0 |= BIT(39);
+	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
+		res0 |= (HCR_TERR | HCR_TEA);
+	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
+		res0 |= HCR_TLOR;
+	if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, IMP))
+		res1 |= HCR_E2H;
+	set_sysreg_masks(kvm, HCR_EL2, res0, res1);
+
 out:
 	mutex_unlock(&kvm->arch.config_lock);