diff mbox series

[v2,03/25] KVM: arm64: nv: Add sanitising to VNCR-backed sysregs

Message ID 20240130204533.693853-4-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM/arm64: VM configuration enforcement | expand

Commit Message

Marc Zyngier Jan. 30, 2024, 8:45 p.m. UTC
VNCR-backed "registers" are actually only memory. Which means that
there is zero control over what the guest can write, and that it
is the hypervisor's job to actually sanitise the content of the
backing store. Yeah, this is fun.

In order to preserve some form of sanity, add a repainting mechanism
that makes use of a per-VM set of RES0/RES1 masks, one pair per VNCR
register. These masks get applied on access to the backing store via
__vcpu_sys_reg(), ensuring that the state that is consumed by KVM is
correct.

So far, nothing populates these masks, but stay tuned.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_host.h | 22 ++++++++++++++++-
 arch/arm64/kvm/arm.c              |  1 +
 arch/arm64/kvm/nested.c           | 41 ++++++++++++++++++++++++++++++-
 3 files changed, 62 insertions(+), 2 deletions(-)

Comments

Joey Gouly Jan. 31, 2024, 2:52 p.m. UTC | #1
Hello,

On Tue, Jan 30, 2024 at 08:45:10PM +0000, Marc Zyngier wrote:
> VNCR-backed "registers" are actually only memory. Which means that
> there is zero control over what the guest can write, and that it
> is the hypervisor's job to actually sanitise the content of the
> backing store. Yeah, this is fun.
> 
> In order to preserve some form of sanity, add a repainting mechanism
> that makes use of a per-VM set of RES0/RES1 masks, one pair per VNCR
> register. These masks get applied on access to the backing store via
> __vcpu_sys_reg(), ensuring that the state that is consumed by KVM is
> correct.
> 
> So far, nothing populates these masks, but stay tuned.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/include/asm/kvm_host.h | 22 ++++++++++++++++-
>  arch/arm64/kvm/arm.c              |  1 +
>  arch/arm64/kvm/nested.c           | 41 ++++++++++++++++++++++++++++++-
>  3 files changed, 62 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index c0cf9c5f5e8d..816288e2d735 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -238,6 +238,8 @@ static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
>  	return index;
>  }
>  
> +struct kvm_sysreg_masks;
> +
>  struct kvm_arch {
>  	struct kvm_s2_mmu mmu;
>  
> @@ -312,6 +314,9 @@ struct kvm_arch {
>  #define KVM_ARM_ID_REG_NUM	(IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
>  	u64 id_regs[KVM_ARM_ID_REG_NUM];
>  
> +	/* Masks for VNCR-baked sysregs */
> +	struct kvm_sysreg_masks	*sysreg_masks;
> +
>  	/*
>  	 * For an untrusted host VM, 'pkvm.handle' is used to lookup
>  	 * the associated pKVM instance in the hypervisor.
> @@ -474,6 +479,13 @@ enum vcpu_sysreg {
>  	NR_SYS_REGS	/* Nothing after this line! */
>  };
>  
> +struct kvm_sysreg_masks {
> +	struct {
> +		u64	res0;
> +		u64	res1;
> +	} mask[NR_SYS_REGS - __VNCR_START__];
> +};
> +
>  struct kvm_cpu_context {
>  	struct user_pt_regs regs;	/* sp = sp_el0 */
>  
> @@ -868,7 +880,15 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
>  
>  #define ctxt_sys_reg(c,r)	(*__ctxt_sys_reg(c,r))
>  
> -#define __vcpu_sys_reg(v,r)	(ctxt_sys_reg(&(v)->arch.ctxt, (r)))
> +u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
> +#define __vcpu_sys_reg(v,r)						\
> +	(*({								\
> +		const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt;	\
> +		u64 *__r = __ctxt_sys_reg(ctxt, (r));			\
> +		if (vcpu_has_nv((v)) && (r) >= __VNCR_START__)		\
> +			*__r = kvm_vcpu_sanitise_vncr_reg((v), (r));	\
> +		__r;							\
> +	}))
>  
>  u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
>  void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index a25265aca432..c063e84fc72c 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -206,6 +206,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
>  		pkvm_destroy_hyp_vm(kvm);
>  
>  	kfree(kvm->arch.mpidr_data);
> +	kfree(kvm->arch.sysreg_masks);
>  	kvm_destroy_vcpus(kvm);
>  
>  	kvm_unshare_hyp(kvm, kvm + 1);
> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> index d55e809e26cb..c976cd4b8379 100644
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -163,15 +163,54 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
>  
>  	return val;
>  }
> +
> +u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
> +{
> +	u64 v = ctxt_sys_reg(&vcpu->arch.ctxt, sr);
> +	struct kvm_sysreg_masks *masks;
> +
> +	masks = vcpu->kvm->arch.sysreg_masks;
> +
> +	if (masks) {
> +		sr -= __VNCR_START__;
> +
> +		v &= ~masks->mask[sr].res0;
> +		v |= masks->mask[sr].res1;
> +	}
> +
> +	return v;
> +}
> +
> +static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
> +{
> +	int i = sr - __VNCR_START__;
> +
> +	kvm->arch.sysreg_masks->mask[i].res0 = res0;
> +	kvm->arch.sysreg_masks->mask[i].res1 = res1;
> +}
> +
>  int kvm_init_nv_sysregs(struct kvm *kvm)
>  {
> +	int ret = 0;
> +
>  	mutex_lock(&kvm->arch.config_lock);
>  
> +	if (kvm->arch.sysreg_masks)
> +		goto out;
> +
> +	kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)),
> +					 GFP_KERNEL);
> +	if (!kvm->arch.sysreg_masks) {
> +		ret = -ENOMEM;
> +		goto out;
> +	}
> +
>  	for (int i = 0; i < KVM_ARM_ID_REG_NUM; i++)
>  		kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
>  						       kvm->arch.id_regs[i]);
>  
> +out:
>  	mutex_unlock(&kvm->arch.config_lock);
>  
> -	return 0;
> +	return ret;
>  }

Used suggestion from v1 to use vcpu_has_nv()/collapse macros.

Reviewed-by: Joey Gouly <joey.gouly@arm.com>

Thanks,
Joey
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index c0cf9c5f5e8d..816288e2d735 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -238,6 +238,8 @@  static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
 	return index;
 }
 
+struct kvm_sysreg_masks;
+
 struct kvm_arch {
 	struct kvm_s2_mmu mmu;
 
@@ -312,6 +314,9 @@  struct kvm_arch {
 #define KVM_ARM_ID_REG_NUM	(IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
 	u64 id_regs[KVM_ARM_ID_REG_NUM];
 
+	/* Masks for VNCR-baked sysregs */
+	struct kvm_sysreg_masks	*sysreg_masks;
+
 	/*
 	 * For an untrusted host VM, 'pkvm.handle' is used to lookup
 	 * the associated pKVM instance in the hypervisor.
@@ -474,6 +479,13 @@  enum vcpu_sysreg {
 	NR_SYS_REGS	/* Nothing after this line! */
 };
 
+struct kvm_sysreg_masks {
+	struct {
+		u64	res0;
+		u64	res1;
+	} mask[NR_SYS_REGS - __VNCR_START__];
+};
+
 struct kvm_cpu_context {
 	struct user_pt_regs regs;	/* sp = sp_el0 */
 
@@ -868,7 +880,15 @@  static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
 
 #define ctxt_sys_reg(c,r)	(*__ctxt_sys_reg(c,r))
 
-#define __vcpu_sys_reg(v,r)	(ctxt_sys_reg(&(v)->arch.ctxt, (r)))
+u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
+#define __vcpu_sys_reg(v,r)						\
+	(*({								\
+		const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt;	\
+		u64 *__r = __ctxt_sys_reg(ctxt, (r));			\
+		if (vcpu_has_nv((v)) && (r) >= __VNCR_START__)		\
+			*__r = kvm_vcpu_sanitise_vncr_reg((v), (r));	\
+		__r;							\
+	}))
 
 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index a25265aca432..c063e84fc72c 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -206,6 +206,7 @@  void kvm_arch_destroy_vm(struct kvm *kvm)
 		pkvm_destroy_hyp_vm(kvm);
 
 	kfree(kvm->arch.mpidr_data);
+	kfree(kvm->arch.sysreg_masks);
 	kvm_destroy_vcpus(kvm);
 
 	kvm_unshare_hyp(kvm, kvm + 1);
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index d55e809e26cb..c976cd4b8379 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -163,15 +163,54 @@  static u64 limit_nv_id_reg(u32 id, u64 val)
 
 	return val;
 }
+
+u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
+{
+	u64 v = ctxt_sys_reg(&vcpu->arch.ctxt, sr);
+	struct kvm_sysreg_masks *masks;
+
+	masks = vcpu->kvm->arch.sysreg_masks;
+
+	if (masks) {
+		sr -= __VNCR_START__;
+
+		v &= ~masks->mask[sr].res0;
+		v |= masks->mask[sr].res1;
+	}
+
+	return v;
+}
+
+static void __maybe_unused set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
+{
+	int i = sr - __VNCR_START__;
+
+	kvm->arch.sysreg_masks->mask[i].res0 = res0;
+	kvm->arch.sysreg_masks->mask[i].res1 = res1;
+}
+
 int kvm_init_nv_sysregs(struct kvm *kvm)
 {
+	int ret = 0;
+
 	mutex_lock(&kvm->arch.config_lock);
 
+	if (kvm->arch.sysreg_masks)
+		goto out;
+
+	kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)),
+					 GFP_KERNEL);
+	if (!kvm->arch.sysreg_masks) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
 	for (int i = 0; i < KVM_ARM_ID_REG_NUM; i++)
 		kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
 						       kvm->arch.id_regs[i]);
 
+out:
 	mutex_unlock(&kvm->arch.config_lock);
 
-	return 0;
+	return ret;
 }