diff mbox series

[v4,03/14] KVM: arm64: Kill 32-bit vCPUs on systems with mismatched EL0 support

Message ID 20201124155039.13804-4-will@kernel.org (mailing list archive)
State New, archived
Headers show
Series An alternative series for asymmetric AArch32 systems | expand

Commit Message

Will Deacon Nov. 24, 2020, 3:50 p.m. UTC
If a vCPU is caught running 32-bit code on a system with mismatched
support at EL0, then we should kill it.

Acked-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Will Deacon <will@kernel.org>
---
 arch/arm64/kvm/arm.c | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

Comments

Marc Zyngier Nov. 27, 2020, 10:26 a.m. UTC | #1
On 2020-11-24 15:50, Will Deacon wrote:
> If a vCPU is caught running 32-bit code on a system with mismatched
> support at EL0, then we should kill it.
> 
> Acked-by: Marc Zyngier <maz@kernel.org>
> Signed-off-by: Will Deacon <will@kernel.org>
> ---
>  arch/arm64/kvm/arm.c | 11 ++++++++++-
>  1 file changed, 10 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 5750ec34960e..d322ac0f4a8e 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -633,6 +633,15 @@ static void check_vcpu_requests(struct kvm_vcpu 
> *vcpu)
>  	}
>  }
> 
> +static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
> +{
> +	if (likely(!vcpu_mode_is_32bit(vcpu)))
> +		return false;
> +
> +	return !system_supports_32bit_el0() ||
> +		static_branch_unlikely(&arm64_mismatched_32bit_el0);
> +}
> +
>  /**
>   * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute 
> guest code
>   * @vcpu:	The VCPU pointer
> @@ -816,7 +825,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
>  		 * with the asymmetric AArch32 case), return to userspace with
>  		 * a fatal error.
>  		 */
> -		if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
> +		if (vcpu_mode_is_bad_32bit(vcpu)) {
>  			/*
>  			 * As we have caught the guest red-handed, decide that
>  			 * it isn't fit for purpose anymore by making the vcpu

Given the new definition of system_supports_32bit_el0() in the previous 
patch,
why do we need this patch at all?

Thanks,

         M.
Will Deacon Nov. 27, 2020, 11:53 a.m. UTC | #2
On Fri, Nov 27, 2020 at 10:26:47AM +0000, Marc Zyngier wrote:
> On 2020-11-24 15:50, Will Deacon wrote:
> > If a vCPU is caught running 32-bit code on a system with mismatched
> > support at EL0, then we should kill it.
> > 
> > Acked-by: Marc Zyngier <maz@kernel.org>
> > Signed-off-by: Will Deacon <will@kernel.org>
> > ---
> >  arch/arm64/kvm/arm.c | 11 ++++++++++-
> >  1 file changed, 10 insertions(+), 1 deletion(-)
> > 
> > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > index 5750ec34960e..d322ac0f4a8e 100644
> > --- a/arch/arm64/kvm/arm.c
> > +++ b/arch/arm64/kvm/arm.c
> > @@ -633,6 +633,15 @@ static void check_vcpu_requests(struct kvm_vcpu
> > *vcpu)
> >  	}
> >  }
> > 
> > +static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
> > +{
> > +	if (likely(!vcpu_mode_is_32bit(vcpu)))
> > +		return false;
> > +
> > +	return !system_supports_32bit_el0() ||
> > +		static_branch_unlikely(&arm64_mismatched_32bit_el0);
> > +}
> > +
> >  /**
> >   * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute
> > guest code
> >   * @vcpu:	The VCPU pointer
> > @@ -816,7 +825,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
> >  		 * with the asymmetric AArch32 case), return to userspace with
> >  		 * a fatal error.
> >  		 */
> > -		if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
> > +		if (vcpu_mode_is_bad_32bit(vcpu)) {
> >  			/*
> >  			 * As we have caught the guest red-handed, decide that
> >  			 * it isn't fit for purpose anymore by making the vcpu
> 
> Given the new definition of system_supports_32bit_el0() in the previous
> patch,
> why do we need this patch at all?

I think the check is still needed, as this is an unusual case where we
want to reject the mismatched system. For example, imagine
'arm64_mismatched_32bit_el0' is true and we're on a mismatched system: in
this case system_supports_32bit_el0() will return 'true' because we
allow 32-bit applications to run, we support the 32-bit personality etc.

However, we still want to terminate 32-bit vCPUs if we spot them in this
situation, so we have to check for:

	!system_supports_32bit_el0() ||
	static_branch_unlikely(&arm64_mismatched_32bit_el0)

so that we only allow 32-bit vCPUs when all of the physical CPUs support
it at EL0.

I could make this clearer either by adding a comment, or avoiding
system_supports_32bit_el0() entirely here and just checking the
sanitised SYS_ID_AA64PFR0_EL1 register directly instead.

What do you prefer?

Will
Marc Zyngier Nov. 27, 2020, 5:14 p.m. UTC | #3
On 2020-11-27 11:53, Will Deacon wrote:
> On Fri, Nov 27, 2020 at 10:26:47AM +0000, Marc Zyngier wrote:
>> On 2020-11-24 15:50, Will Deacon wrote:
>> > If a vCPU is caught running 32-bit code on a system with mismatched
>> > support at EL0, then we should kill it.
>> >
>> > Acked-by: Marc Zyngier <maz@kernel.org>
>> > Signed-off-by: Will Deacon <will@kernel.org>
>> > ---
>> >  arch/arm64/kvm/arm.c | 11 ++++++++++-
>> >  1 file changed, 10 insertions(+), 1 deletion(-)
>> >
>> > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
>> > index 5750ec34960e..d322ac0f4a8e 100644
>> > --- a/arch/arm64/kvm/arm.c
>> > +++ b/arch/arm64/kvm/arm.c
>> > @@ -633,6 +633,15 @@ static void check_vcpu_requests(struct kvm_vcpu
>> > *vcpu)
>> >  	}
>> >  }
>> >
>> > +static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
>> > +{
>> > +	if (likely(!vcpu_mode_is_32bit(vcpu)))
>> > +		return false;
>> > +
>> > +	return !system_supports_32bit_el0() ||
>> > +		static_branch_unlikely(&arm64_mismatched_32bit_el0);
>> > +}
>> > +
>> >  /**
>> >   * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute
>> > guest code
>> >   * @vcpu:	The VCPU pointer
>> > @@ -816,7 +825,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
>> >  		 * with the asymmetric AArch32 case), return to userspace with
>> >  		 * a fatal error.
>> >  		 */
>> > -		if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
>> > +		if (vcpu_mode_is_bad_32bit(vcpu)) {
>> >  			/*
>> >  			 * As we have caught the guest red-handed, decide that
>> >  			 * it isn't fit for purpose anymore by making the vcpu
>> 
>> Given the new definition of system_supports_32bit_el0() in the 
>> previous
>> patch,
>> why do we need this patch at all?
> 
> I think the check is still needed, as this is an unusual case where we
> want to reject the mismatched system. For example, imagine
> 'arm64_mismatched_32bit_el0' is true and we're on a mismatched system: 
> in
> this case system_supports_32bit_el0() will return 'true' because we
> allow 32-bit applications to run, we support the 32-bit personality 
> etc.
> 
> However, we still want to terminate 32-bit vCPUs if we spot them in 
> this
> situation, so we have to check for:
> 
> 	!system_supports_32bit_el0() ||
> 	static_branch_unlikely(&arm64_mismatched_32bit_el0)
> 
> so that we only allow 32-bit vCPUs when all of the physical CPUs 
> support
> it at EL0.
> 
> I could make this clearer either by adding a comment, or avoiding
> system_supports_32bit_el0() entirely here and just checking the
> sanitised SYS_ID_AA64PFR0_EL1 register directly instead.
> 
> What do you prefer?

Yeah, the sanitized read feels better, if only because that is
what we are going to read in all the valid cases, unfortunately.
read_sanitised_ftr_reg() is sadly not designed to be called on
a fast path, meaning that 32bit guests will do a bsearch() on
the ID-regs every time they exit...

I guess we will have to evaluate how much we loose with this.

Thanks,

         M.
Quentin Perret Nov. 27, 2020, 5:24 p.m. UTC | #4
On Friday 27 Nov 2020 at 17:14:11 (+0000), Marc Zyngier wrote:
> On 2020-11-27 11:53, Will Deacon wrote:
> > On Fri, Nov 27, 2020 at 10:26:47AM +0000, Marc Zyngier wrote:
> > > On 2020-11-24 15:50, Will Deacon wrote:
> > > > If a vCPU is caught running 32-bit code on a system with mismatched
> > > > support at EL0, then we should kill it.
> > > >
> > > > Acked-by: Marc Zyngier <maz@kernel.org>
> > > > Signed-off-by: Will Deacon <will@kernel.org>
> > > > ---
> > > >  arch/arm64/kvm/arm.c | 11 ++++++++++-
> > > >  1 file changed, 10 insertions(+), 1 deletion(-)
> > > >
> > > > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > > > index 5750ec34960e..d322ac0f4a8e 100644
> > > > --- a/arch/arm64/kvm/arm.c
> > > > +++ b/arch/arm64/kvm/arm.c
> > > > @@ -633,6 +633,15 @@ static void check_vcpu_requests(struct kvm_vcpu
> > > > *vcpu)
> > > >  	}
> > > >  }
> > > >
> > > > +static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
> > > > +{
> > > > +	if (likely(!vcpu_mode_is_32bit(vcpu)))
> > > > +		return false;
> > > > +
> > > > +	return !system_supports_32bit_el0() ||
> > > > +		static_branch_unlikely(&arm64_mismatched_32bit_el0);
> > > > +}
> > > > +
> > > >  /**
> > > >   * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute
> > > > guest code
> > > >   * @vcpu:	The VCPU pointer
> > > > @@ -816,7 +825,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
> > > >  		 * with the asymmetric AArch32 case), return to userspace with
> > > >  		 * a fatal error.
> > > >  		 */
> > > > -		if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
> > > > +		if (vcpu_mode_is_bad_32bit(vcpu)) {
> > > >  			/*
> > > >  			 * As we have caught the guest red-handed, decide that
> > > >  			 * it isn't fit for purpose anymore by making the vcpu
> > > 
> > > Given the new definition of system_supports_32bit_el0() in the
> > > previous
> > > patch,
> > > why do we need this patch at all?
> > 
> > I think the check is still needed, as this is an unusual case where we
> > want to reject the mismatched system. For example, imagine
> > 'arm64_mismatched_32bit_el0' is true and we're on a mismatched system:
> > in
> > this case system_supports_32bit_el0() will return 'true' because we
> > allow 32-bit applications to run, we support the 32-bit personality etc.
> > 
> > However, we still want to terminate 32-bit vCPUs if we spot them in this
> > situation, so we have to check for:
> > 
> > 	!system_supports_32bit_el0() ||
> > 	static_branch_unlikely(&arm64_mismatched_32bit_el0)
> > 
> > so that we only allow 32-bit vCPUs when all of the physical CPUs support
> > it at EL0.
> > 
> > I could make this clearer either by adding a comment, or avoiding
> > system_supports_32bit_el0() entirely here and just checking the
> > sanitised SYS_ID_AA64PFR0_EL1 register directly instead.
> > 
> > What do you prefer?
> 
> Yeah, the sanitized read feels better, if only because that is
> what we are going to read in all the valid cases, unfortunately.
> read_sanitised_ftr_reg() is sadly not designed to be called on
> a fast path, meaning that 32bit guests will do a bsearch() on
> the ID-regs every time they exit...
> 
> I guess we will have to evaluate how much we loose with this.

Could we use the trick we have for arm64_ftr_reg_ctrel0 to speed this
up?

Thanks,
Quentin
Marc Zyngier Nov. 27, 2020, 6:16 p.m. UTC | #5
On 2020-11-27 17:24, Quentin Perret wrote:
> On Friday 27 Nov 2020 at 17:14:11 (+0000), Marc Zyngier wrote:

[...]

>> Yeah, the sanitized read feels better, if only because that is
>> what we are going to read in all the valid cases, unfortunately.
>> read_sanitised_ftr_reg() is sadly not designed to be called on
>> a fast path, meaning that 32bit guests will do a bsearch() on
>> the ID-regs every time they exit...
>> 
>> I guess we will have to evaluate how much we loose with this.
> 
> Could we use the trick we have for arm64_ftr_reg_ctrel0 to speed this
> up?

Maybe. I want to first verify whether this has any measurable impact.
Another possibility would be to cache the last read_sanitised_ftr_reg()
access, just to see if that helps. There shouldn't be that many code
paths hammering it.

Thanks,

         M.
Will Deacon Dec. 1, 2020, 4:57 p.m. UTC | #6
On Fri, Nov 27, 2020 at 06:16:35PM +0000, Marc Zyngier wrote:
> On 2020-11-27 17:24, Quentin Perret wrote:
> > On Friday 27 Nov 2020 at 17:14:11 (+0000), Marc Zyngier wrote:
> 
> [...]
> 
> > > Yeah, the sanitized read feels better, if only because that is
> > > what we are going to read in all the valid cases, unfortunately.
> > > read_sanitised_ftr_reg() is sadly not designed to be called on
> > > a fast path, meaning that 32bit guests will do a bsearch() on
> > > the ID-regs every time they exit...
> > > 
> > > I guess we will have to evaluate how much we loose with this.
> > 
> > Could we use the trick we have for arm64_ftr_reg_ctrel0 to speed this
> > up?
> 
> Maybe. I want to first verify whether this has any measurable impact.
> Another possibility would be to cache the last read_sanitised_ftr_reg()
> access, just to see if that helps. There shouldn't be that many code
> paths hammering it.

We don't have huge numbers of ID registers, so the bsearch shouldn't be
too expensive. However, I'd like to remind myself why we can't index into
the feature register array directly as we _should_ know all of this stuff
at compile time, right?

Will
Marc Zyngier Dec. 2, 2020, 8:18 a.m. UTC | #7
On 2020-12-01 16:57, Will Deacon wrote:
> On Fri, Nov 27, 2020 at 06:16:35PM +0000, Marc Zyngier wrote:
>> On 2020-11-27 17:24, Quentin Perret wrote:
>> > On Friday 27 Nov 2020 at 17:14:11 (+0000), Marc Zyngier wrote:
>> 
>> [...]
>> 
>> > > Yeah, the sanitized read feels better, if only because that is
>> > > what we are going to read in all the valid cases, unfortunately.
>> > > read_sanitised_ftr_reg() is sadly not designed to be called on
>> > > a fast path, meaning that 32bit guests will do a bsearch() on
>> > > the ID-regs every time they exit...
>> > >
>> > > I guess we will have to evaluate how much we loose with this.
>> >
>> > Could we use the trick we have for arm64_ftr_reg_ctrel0 to speed this
>> > up?
>> 
>> Maybe. I want to first verify whether this has any measurable impact.
>> Another possibility would be to cache the last 
>> read_sanitised_ftr_reg()
>> access, just to see if that helps. There shouldn't be that many code
>> paths hammering it.
> 
> We don't have huge numbers of ID registers, so the bsearch shouldn't be
> too expensive. However, I'd like to remind myself why we can't index 
> into
> the feature register array directly as we _should_ know all of this 
> stuff
> at compile time, right?

Simply because it's not indexed by ID reg. It's just an ordered 
collection,
similar to the for sys_reg emulation in KVM. You can compute the index
ahead of time, but just not at compile time. At least not with the
way the arm64_ftr_regs array is built.

         M.
Will Deacon Dec. 2, 2020, 5:27 p.m. UTC | #8
On Wed, Dec 02, 2020 at 08:18:03AM +0000, Marc Zyngier wrote:
> On 2020-12-01 16:57, Will Deacon wrote:
> > On Fri, Nov 27, 2020 at 06:16:35PM +0000, Marc Zyngier wrote:
> > > On 2020-11-27 17:24, Quentin Perret wrote:
> > > > On Friday 27 Nov 2020 at 17:14:11 (+0000), Marc Zyngier wrote:
> > > 
> > > [...]
> > > 
> > > > > Yeah, the sanitized read feels better, if only because that is
> > > > > what we are going to read in all the valid cases, unfortunately.
> > > > > read_sanitised_ftr_reg() is sadly not designed to be called on
> > > > > a fast path, meaning that 32bit guests will do a bsearch() on
> > > > > the ID-regs every time they exit...
> > > > >
> > > > > I guess we will have to evaluate how much we loose with this.
> > > >
> > > > Could we use the trick we have for arm64_ftr_reg_ctrel0 to speed this
> > > > up?
> > > 
> > > Maybe. I want to first verify whether this has any measurable impact.
> > > Another possibility would be to cache the last
> > > read_sanitised_ftr_reg()
> > > access, just to see if that helps. There shouldn't be that many code
> > > paths hammering it.
> > 
> > We don't have huge numbers of ID registers, so the bsearch shouldn't be
> > too expensive. However, I'd like to remind myself why we can't index
> > into
> > the feature register array directly as we _should_ know all of this
> > stuff
> > at compile time, right?
> 
> Simply because it's not indexed by ID reg. It's just an ordered collection,
> similar to the for sys_reg emulation in KVM. You can compute the index
> ahead of time, but just not at compile time. At least not with the
> way the arm64_ftr_regs array is built.

FWIW, if your testing shows that the bsearch() is costing us, I've hacked
up an interface to access the ID registers directly (see below) which I
can include with this series.

Will

--->8

diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index da250e4741bd..23766104d756 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -599,7 +599,49 @@ static inline bool id_aa64pfr0_sve(u64 pfr0)
 void __init setup_cpu_features(void);
 void check_local_cpu_capabilities(void);
 
+#define ARM64_FTR_REG2IDX(id)	id ## _IDX
+enum arm64_ftr_reg_idx {
+	ARM64_FTR_REG2IDX(SYS_ID_PFR0_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_PFR1_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_DFR0_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_MMFR0_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_MMFR1_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_MMFR2_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_MMFR3_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_ISAR0_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_ISAR1_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_ISAR2_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_ISAR3_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_ISAR4_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_ISAR5_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_MMFR4_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_ISAR6_EL1),
+	ARM64_FTR_REG2IDX(SYS_MVFR0_EL1),
+	ARM64_FTR_REG2IDX(SYS_MVFR1_EL1),
+	ARM64_FTR_REG2IDX(SYS_MVFR2_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_PFR2_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_DFR1_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_MMFR5_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_AA64PFR0_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_AA64PFR1_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_AA64ZFR0_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_AA64DFR0_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_AA64DFR1_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_AA64ISAR0_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_AA64ISAR1_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_AA64MMFR0_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_AA64MMFR1_EL1),
+	ARM64_FTR_REG2IDX(SYS_ID_AA64MMFR2_EL1),
+	ARM64_FTR_REG2IDX(SYS_ZCR_EL1),
+	ARM64_FTR_REG2IDX(SYS_CTR_EL0),
+	ARM64_FTR_REG2IDX(SYS_DCZID_EL0),
+	ARM64_FTR_REG2IDX(SYS_CNTFRQ_EL0),
+
+	ARM64_FTR_REG_IDX_MAX,
+};
+
 u64 read_sanitised_ftr_reg(u32 id);
+u64 read_sanitised_ftr_reg_by_idx(enum arm64_ftr_reg_idx idx);
 
 static inline bool cpu_supports_mixed_endian_el0(void)
 {
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 6f36c4f62f69..05223352db5d 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -546,16 +546,18 @@ static const struct arm64_ftr_bits ftr_raz[] = {
 	ARM64_FTR_END,
 };
 
-#define ARM64_FTR_REG(id, table) {		\
-	.sys_id = id,				\
-	.reg = 	&(struct arm64_ftr_reg){	\
-		.name = #id,			\
-		.ftr_bits = &((table)[0]),	\
-	}}
+#define ARM64_FTR_REG(id, table)					\
+	[id ## _IDX] = {							\
+		.sys_id = id,						\
+		.reg	= &(struct arm64_ftr_reg) {			\
+			.name		= #id,				\
+			.ftr_bits	= &((table)[0]),		\
+		}							\
+	}
 
 static const struct __ftr_reg_entry {
 	u32			sys_id;
-	struct arm64_ftr_reg 	*reg;
+	struct arm64_ftr_reg	*reg;
 } arm64_ftr_regs[] = {
 
 	/* Op1 = 0, CRn = 0, CRm = 1 */
@@ -607,7 +609,7 @@ static const struct __ftr_reg_entry {
 	ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
 
 	/* Op1 = 3, CRn = 0, CRm = 0 */
-	{ SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
+	[ARM64_FTR_REG2IDX(SYS_CTR_EL0)] = { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
 	ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
 
 	/* Op1 = 3, CRn = 14, CRm = 0 */
@@ -1116,6 +1118,18 @@ u64 read_sanitised_ftr_reg(u32 id)
 }
 EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);
 
+u64 read_sanitised_ftr_reg_by_idx(enum arm64_ftr_reg_idx idx)
+{
+	struct arm64_ftr_reg *regp;
+
+	if (WARN_ON((unsigned)idx >= ARM64_FTR_REG_IDX_MAX))
+		return 0;
+
+	regp = arm64_ftr_regs[idx].reg;
+	return regp->sys_val;
+}
+EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg_by_idx);
+
 #define read_sysreg_case(r)	\
 	case r:		return read_sysreg_s(r)
diff mbox series

Patch

diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 5750ec34960e..d322ac0f4a8e 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -633,6 +633,15 @@  static void check_vcpu_requests(struct kvm_vcpu *vcpu)
 	}
 }
 
+static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
+{
+	if (likely(!vcpu_mode_is_32bit(vcpu)))
+		return false;
+
+	return !system_supports_32bit_el0() ||
+		static_branch_unlikely(&arm64_mismatched_32bit_el0);
+}
+
 /**
  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
  * @vcpu:	The VCPU pointer
@@ -816,7 +825,7 @@  int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 		 * with the asymmetric AArch32 case), return to userspace with
 		 * a fatal error.
 		 */
-		if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
+		if (vcpu_mode_is_bad_32bit(vcpu)) {
 			/*
 			 * As we have caught the guest red-handed, decide that
 			 * it isn't fit for purpose anymore by making the vcpu