diff mbox series

[01/13] KVM: arm64: Harden __ctxt_sys_reg() against out-of-range values

Message ID 20240219092014.783809-2-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM/arm64: Add NV support for ERET and PAuth | expand

Commit Message

Marc Zyngier Feb. 19, 2024, 9:20 a.m. UTC
The unsuspecting kernel tinkerer can be easily confused into
writing something that looks like this:

	ikey.lo = __vcpu_sys_reg(vcpu, SYS_APIAKEYLO_EL1);

which seems vaguely sensible, until you realise that the second
parameter is the encoding of a sysreg, and not the index into
the vcpu sysreg file... Debugging what happens in this case is
an interesting exercise in head<->wall interactions.

As they often say: "Any resemblance to actual persons, living
or dead, or actual events is purely coincidental".

In order to save people's time, add some compile-time hardening
that will at least weed out the "stupidly out of range" values.
This will *not* catch anything that isn't a compile-time constant.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_host.h | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

Comments

Joey Gouly Feb. 20, 2024, 11:20 a.m. UTC | #1
On Mon, Feb 19, 2024 at 09:20:02AM +0000, Marc Zyngier wrote:
> The unsuspecting kernel tinkerer can be easily confused into
> writing something that looks like this:
> 
> 	ikey.lo = __vcpu_sys_reg(vcpu, SYS_APIAKEYLO_EL1);
> 
> which seems vaguely sensible, until you realise that the second
> parameter is the encoding of a sysreg, and not the index into
> the vcpu sysreg file... Debugging what happens in this case is

type safety :(

> an interesting exercise in head<->wall interactions.
> 
> As they often say: "Any resemblance to actual persons, living
> or dead, or actual events is purely coincidental".
> 
> In order to save people's time, add some compile-time hardening
> that will at least weed out the "stupidly out of range" values.
> This will *not* catch anything that isn't a compile-time constant.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/include/asm/kvm_host.h | 9 ++++++++-
>  1 file changed, 8 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 181fef12e8e8..a5ec4c7d3966 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -895,7 +895,7 @@ struct kvm_vcpu_arch {
>   * Don't bother with VNCR-based accesses in the nVHE code, it has no
>   * business dealing with NV.
>   */
> -static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
> +static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)

When in doubt, add more underscores!

>  {
>  #if !defined (__KVM_NVHE_HYPERVISOR__)
>  	if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
> @@ -905,6 +905,13 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
>  	return (u64 *)&ctxt->sys_regs[r];
>  }
>  
> +#define __ctxt_sys_reg(c,r)						\
> +	({								\
> +	    	BUILD_BUG_ON(__builtin_constant_p(r) &&			\
> +			     (r) >= NR_SYS_REGS);			\
> +		___ctxt_sys_reg(c, r);					\
> +	})

I'm assuming the extra macro layer is to try make __builtin_constant_p() as
effective as possible? Otherwise maybe it relies on the compiler inling the
___ctxt_sys_reg() function? 

> +
>  #define ctxt_sys_reg(c,r)	(*__ctxt_sys_reg(c,r))
>  
>  u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);

Thanks,
Joey
Marc Zyngier Feb. 20, 2024, 11:57 a.m. UTC | #2
On Tue, 20 Feb 2024 11:20:31 +0000,
Joey Gouly <joey.gouly@arm.com> wrote:
> 
> On Mon, Feb 19, 2024 at 09:20:02AM +0000, Marc Zyngier wrote:
> > The unsuspecting kernel tinkerer can be easily confused into
> > writing something that looks like this:
> > 
> > 	ikey.lo = __vcpu_sys_reg(vcpu, SYS_APIAKEYLO_EL1);
> > 
> > which seems vaguely sensible, until you realise that the second
> > parameter is the encoding of a sysreg, and not the index into
> > the vcpu sysreg file... Debugging what happens in this case is
> 
> type safety :(

Are you advocating for making everything a struct? Or something else?

> 
> > an interesting exercise in head<->wall interactions.
> > 
> > As they often say: "Any resemblance to actual persons, living
> > or dead, or actual events is purely coincidental".
> > 
> > In order to save people's time, add some compile-time hardening
> > that will at least weed out the "stupidly out of range" values.
> > This will *not* catch anything that isn't a compile-time constant.
> > 
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> >  arch/arm64/include/asm/kvm_host.h | 9 ++++++++-
> >  1 file changed, 8 insertions(+), 1 deletion(-)
> > 
> > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> > index 181fef12e8e8..a5ec4c7d3966 100644
> > --- a/arch/arm64/include/asm/kvm_host.h
> > +++ b/arch/arm64/include/asm/kvm_host.h
> > @@ -895,7 +895,7 @@ struct kvm_vcpu_arch {
> >   * Don't bother with VNCR-based accesses in the nVHE code, it has no
> >   * business dealing with NV.
> >   */
> > -static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
> > +static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
> 
> When in doubt, add more underscores!

That's the one true way.

> 
> >  {
> >  #if !defined (__KVM_NVHE_HYPERVISOR__)
> >  	if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
> > @@ -905,6 +905,13 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
> >  	return (u64 *)&ctxt->sys_regs[r];
> >  }
> >  
> > +#define __ctxt_sys_reg(c,r)						\
> > +	({								\
> > +	    	BUILD_BUG_ON(__builtin_constant_p(r) &&			\
> > +			     (r) >= NR_SYS_REGS);			\
> > +		___ctxt_sys_reg(c, r);					\
> > +	})
> 
> I'm assuming the extra macro layer is to try make __builtin_constant_p() as
> effective as possible? Otherwise maybe it relies on the compiler inling the
> ___ctxt_sys_reg() function?

It's not about efficiency. It's about making it *work*. Otherwise,
lack of inlining will screw you over, and you may not check anything.

Thanks,

	M.
Joey Gouly Feb. 20, 2024, 1:17 p.m. UTC | #3
On Tue, Feb 20, 2024 at 11:57:04AM +0000, Marc Zyngier wrote:
> On Tue, 20 Feb 2024 11:20:31 +0000,
> Joey Gouly <joey.gouly@arm.com> wrote:
> > 
> > On Mon, Feb 19, 2024 at 09:20:02AM +0000, Marc Zyngier wrote:
> > > The unsuspecting kernel tinkerer can be easily confused into
> > > writing something that looks like this:
> > > 
> > > 	ikey.lo = __vcpu_sys_reg(vcpu, SYS_APIAKEYLO_EL1);
> > > 
> > > which seems vaguely sensible, until you realise that the second
> > > parameter is the encoding of a sysreg, and not the index into
> > > the vcpu sysreg file... Debugging what happens in this case is
> > 
> > type safety :(
> 
> Are you advocating for making everything a struct? Or something else?

No, merely lamenting the situation.

Reviewed-by: Joey Gouly <joey.gouly@arm.com>

> 
> > 
> > > an interesting exercise in head<->wall interactions.
> > > 
> > > As they often say: "Any resemblance to actual persons, living
> > > or dead, or actual events is purely coincidental".
> > > 
> > > In order to save people's time, add some compile-time hardening
> > > that will at least weed out the "stupidly out of range" values.
> > > This will *not* catch anything that isn't a compile-time constant.
> > > 
> > > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > > ---
> > >  arch/arm64/include/asm/kvm_host.h | 9 ++++++++-
> > >  1 file changed, 8 insertions(+), 1 deletion(-)
> > > 
> > > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> > > index 181fef12e8e8..a5ec4c7d3966 100644
> > > --- a/arch/arm64/include/asm/kvm_host.h
> > > +++ b/arch/arm64/include/asm/kvm_host.h
> > > @@ -895,7 +895,7 @@ struct kvm_vcpu_arch {
> > >   * Don't bother with VNCR-based accesses in the nVHE code, it has no
> > >   * business dealing with NV.
> > >   */
> > > -static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
> > > +static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
> > 
> > When in doubt, add more underscores!
> 
> That's the one true way.
> 
> > 
> > >  {
> > >  #if !defined (__KVM_NVHE_HYPERVISOR__)
> > >  	if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
> > > @@ -905,6 +905,13 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
> > >  	return (u64 *)&ctxt->sys_regs[r];
> > >  }
> > >  
> > > +#define __ctxt_sys_reg(c,r)						\
> > > +	({								\
> > > +	    	BUILD_BUG_ON(__builtin_constant_p(r) &&			\
> > > +			     (r) >= NR_SYS_REGS);			\
> > > +		___ctxt_sys_reg(c, r);					\
> > > +	})
> > 
> > I'm assuming the extra macro layer is to try make __builtin_constant_p() as
> > effective as possible? Otherwise maybe it relies on the compiler inling the
> > ___ctxt_sys_reg() function?
> 
> It's not about efficiency. It's about making it *work*. Otherwise,
> lack of inlining will screw you over, and you may not check anything.

Thanks,
Joey
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 181fef12e8e8..a5ec4c7d3966 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -895,7 +895,7 @@  struct kvm_vcpu_arch {
  * Don't bother with VNCR-based accesses in the nVHE code, it has no
  * business dealing with NV.
  */
-static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
+static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
 {
 #if !defined (__KVM_NVHE_HYPERVISOR__)
 	if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
@@ -905,6 +905,13 @@  static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
 	return (u64 *)&ctxt->sys_regs[r];
 }
 
+#define __ctxt_sys_reg(c,r)						\
+	({								\
+	    	BUILD_BUG_ON(__builtin_constant_p(r) &&			\
+			     (r) >= NR_SYS_REGS);			\
+		___ctxt_sys_reg(c, r);					\
+	})
+
 #define ctxt_sys_reg(c,r)	(*__ctxt_sys_reg(c,r))
 
 u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);