diff mbox series

[v7,17/27] KVM: arm64: Reject ioctl access to FPSIMD V-regs on SVE vcpus

Message ID 1553864452-15080-18-git-send-email-Dave.Martin@arm.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: SVE guest support | expand

Commit Message

Dave Martin March 29, 2019, 1 p.m. UTC
In order to avoid the pointless complexity of maintaining two ioctl
register access views of the same data, this patch blocks ioctl
access to the FPSIMD V-registers on vcpus that support SVE.

This will make it more straightforward to add SVE register access
support.

Since SVE is an opt-in feature for userspace, this will not affect
existing users.

Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Tested-by: zhang.lei <zhang.lei@jp.fujitsu.com>

---

Changes since v5:

 * Refactored to cope with the removal of core_reg_size_from_offset()
   (which was added by another series which will now be handled
   independently).

   This leaves some duplication in that we still filter the V-regs out
   in two places, but this no worse than other existing code in guest.c.
   I plan to tidy this up independently later on.
---
 arch/arm64/kvm/guest.c | 48 ++++++++++++++++++++++++++++++++++++------------
 1 file changed, 36 insertions(+), 12 deletions(-)

Comments

Andrew Jones April 3, 2019, 8:15 p.m. UTC | #1
On Fri, Mar 29, 2019 at 01:00:42PM +0000, Dave Martin wrote:
> In order to avoid the pointless complexity of maintaining two ioctl
> register access views of the same data, this patch blocks ioctl
> access to the FPSIMD V-registers on vcpus that support SVE.
> 
> This will make it more straightforward to add SVE register access
> support.
> 
> Since SVE is an opt-in feature for userspace, this will not affect
> existing users.
> 
> Signed-off-by: Dave Martin <Dave.Martin@arm.com>
> Reviewed-by: Julien Thierry <julien.thierry@arm.com>
> Tested-by: zhang.lei <zhang.lei@jp.fujitsu.com>
> 
> ---
> 
> Changes since v5:
> 
>  * Refactored to cope with the removal of core_reg_size_from_offset()
>    (which was added by another series which will now be handled
>    independently).
> 
>    This leaves some duplication in that we still filter the V-regs out
>    in two places, but this no worse than other existing code in guest.c.
>    I plan to tidy this up independently later on.
> ---
>  arch/arm64/kvm/guest.c | 48 ++++++++++++++++++++++++++++++++++++------------
>  1 file changed, 36 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
> index a391a61..756d0d6 100644
> --- a/arch/arm64/kvm/guest.c
> +++ b/arch/arm64/kvm/guest.c
> @@ -54,12 +54,19 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
>  	return 0;
>  }
>  
> +static bool core_reg_offset_is_vreg(u64 off)
> +{
> +	return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
> +		off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
> +}
> +
>  static u64 core_reg_offset_from_id(u64 id)
>  {
>  	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
>  }
>  
> -static int validate_core_offset(const struct kvm_one_reg *reg)
> +static int validate_core_offset(const struct kvm_vcpu *vcpu,
> +				const struct kvm_one_reg *reg)
>  {
>  	u64 off = core_reg_offset_from_id(reg->id);
>  	int size;
> @@ -91,11 +98,19 @@ static int validate_core_offset(const struct kvm_one_reg *reg)
>  		return -EINVAL;
>  	}
>  
> -	if (KVM_REG_SIZE(reg->id) == size &&
> -	    IS_ALIGNED(off, size / sizeof(__u32)))
> -		return 0;
> +	if (KVM_REG_SIZE(reg->id) != size ||
> +	    !IS_ALIGNED(off, size / sizeof(__u32)))
> +		return -EINVAL;
>  
> -	return -EINVAL;
> +	/*
> +	 * The KVM_REG_ARM64_SVE regs must be used instead of
> +	 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
> +	 * SVE-enabled vcpus:
> +	 */
> +	if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
> +		return -EINVAL;
> +
> +	return 0;
>  }
>  
>  static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
> @@ -117,7 +132,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
>  	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
>  		return -ENOENT;
>  
> -	if (validate_core_offset(reg))
> +	if (validate_core_offset(vcpu, reg))
>  		return -EINVAL;
>  
>  	if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
> @@ -142,7 +157,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
>  	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
>  		return -ENOENT;
>  
> -	if (validate_core_offset(reg))
> +	if (validate_core_offset(vcpu, reg))
>  		return -EINVAL;
>  
>  	if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
> @@ -195,13 +210,22 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
>  	return -EINVAL;
>  }
>  
> -static int kvm_arm_copy_core_reg_indices(u64 __user *uindices)
> +static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
> +				 u64 __user *uindices)
>  {
>  	unsigned int i;
>  	int n = 0;
>  	const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
>  
>  	for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
> +		/*
> +		 * The KVM_REG_ARM64_SVE regs must be used instead of
> +		 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
> +		 * SVE-enabled vcpus:
> +		 */
> +		if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i))
> +			continue;
> +
>  		if (uindices) {
>  			if (put_user(core_reg | i, uindices))
>  				return -EFAULT;
> @@ -214,9 +238,9 @@ static int kvm_arm_copy_core_reg_indices(u64 __user *uindices)
>  	return n;
>  }
>  
> -static unsigned long num_core_regs(void)
> +static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_arm_copy_core_reg_indices(NULL);
> +	return copy_core_reg_indices(vcpu, NULL);
>  }
>  
>  /**
> @@ -281,7 +305,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
>  {
>  	unsigned long res = 0;
>  
> -	res += num_core_regs();
> +	res += num_core_regs(vcpu);
>  	res += kvm_arm_num_sys_reg_descs(vcpu);
>  	res += kvm_arm_get_fw_num_regs(vcpu);
>  	res += NUM_TIMER_REGS;
> @@ -298,7 +322,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
>  {
>  	int ret;
>  
> -	ret = kvm_arm_copy_core_reg_indices(uindices);
> +	ret = copy_core_reg_indices(vcpu, uindices);
>  	if (ret)
>  		return ret;
>  	uindices += ret;
> -- 
> 2.1.4
>

Reviewed-by: Andrew Jones <drjones@redhat.com>
Alex Bennée April 24, 2019, 1:45 p.m. UTC | #2
Dave Martin <Dave.Martin@arm.com> writes:

> In order to avoid the pointless complexity of maintaining two ioctl
> register access views of the same data, this patch blocks ioctl
> access to the FPSIMD V-registers on vcpus that support SVE.
>
> This will make it more straightforward to add SVE register access
> support.
>
> Since SVE is an opt-in feature for userspace, this will not affect
> existing users.
>
> Signed-off-by: Dave Martin <Dave.Martin@arm.com>
> Reviewed-by: Julien Thierry <julien.thierry@arm.com>
> Tested-by: zhang.lei <zhang.lei@jp.fujitsu.com>

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

>
> ---
>
> Changes since v5:
>
>  * Refactored to cope with the removal of core_reg_size_from_offset()
>    (which was added by another series which will now be handled
>    independently).
>
>    This leaves some duplication in that we still filter the V-regs out
>    in two places, but this no worse than other existing code in guest.c.
>    I plan to tidy this up independently later on.
> ---
>  arch/arm64/kvm/guest.c | 48 ++++++++++++++++++++++++++++++++++++------------
>  1 file changed, 36 insertions(+), 12 deletions(-)
>
> diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
> index a391a61..756d0d6 100644
> --- a/arch/arm64/kvm/guest.c
> +++ b/arch/arm64/kvm/guest.c
> @@ -54,12 +54,19 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
>  	return 0;
>  }
>
> +static bool core_reg_offset_is_vreg(u64 off)
> +{
> +	return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
> +		off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
> +}
> +
>  static u64 core_reg_offset_from_id(u64 id)
>  {
>  	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
>  }
>
> -static int validate_core_offset(const struct kvm_one_reg *reg)
> +static int validate_core_offset(const struct kvm_vcpu *vcpu,
> +				const struct kvm_one_reg *reg)
>  {
>  	u64 off = core_reg_offset_from_id(reg->id);
>  	int size;
> @@ -91,11 +98,19 @@ static int validate_core_offset(const struct kvm_one_reg *reg)
>  		return -EINVAL;
>  	}
>
> -	if (KVM_REG_SIZE(reg->id) == size &&
> -	    IS_ALIGNED(off, size / sizeof(__u32)))
> -		return 0;
> +	if (KVM_REG_SIZE(reg->id) != size ||
> +	    !IS_ALIGNED(off, size / sizeof(__u32)))
> +		return -EINVAL;
>
> -	return -EINVAL;
> +	/*
> +	 * The KVM_REG_ARM64_SVE regs must be used instead of
> +	 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
> +	 * SVE-enabled vcpus:
> +	 */
> +	if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
> +		return -EINVAL;
> +
> +	return 0;
>  }
>
>  static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
> @@ -117,7 +132,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
>  	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
>  		return -ENOENT;
>
> -	if (validate_core_offset(reg))
> +	if (validate_core_offset(vcpu, reg))
>  		return -EINVAL;
>
>  	if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
> @@ -142,7 +157,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
>  	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
>  		return -ENOENT;
>
> -	if (validate_core_offset(reg))
> +	if (validate_core_offset(vcpu, reg))
>  		return -EINVAL;
>
>  	if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
> @@ -195,13 +210,22 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
>  	return -EINVAL;
>  }
>
> -static int kvm_arm_copy_core_reg_indices(u64 __user *uindices)
> +static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
> +				 u64 __user *uindices)
>  {
>  	unsigned int i;
>  	int n = 0;
>  	const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
>
>  	for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
> +		/*
> +		 * The KVM_REG_ARM64_SVE regs must be used instead of
> +		 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
> +		 * SVE-enabled vcpus:
> +		 */
> +		if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i))
> +			continue;
> +
>  		if (uindices) {
>  			if (put_user(core_reg | i, uindices))
>  				return -EFAULT;
> @@ -214,9 +238,9 @@ static int kvm_arm_copy_core_reg_indices(u64 __user *uindices)
>  	return n;
>  }
>
> -static unsigned long num_core_regs(void)
> +static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_arm_copy_core_reg_indices(NULL);
> +	return copy_core_reg_indices(vcpu, NULL);
>  }
>
>  /**
> @@ -281,7 +305,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
>  {
>  	unsigned long res = 0;
>
> -	res += num_core_regs();
> +	res += num_core_regs(vcpu);
>  	res += kvm_arm_num_sys_reg_descs(vcpu);
>  	res += kvm_arm_get_fw_num_regs(vcpu);
>  	res += NUM_TIMER_REGS;
> @@ -298,7 +322,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
>  {
>  	int ret;
>
> -	ret = kvm_arm_copy_core_reg_indices(uindices);
> +	ret = copy_core_reg_indices(vcpu, uindices);
>  	if (ret)
>  		return ret;
>  	uindices += ret;


--
Alex Bennée
diff mbox series

Patch

diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index a391a61..756d0d6 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -54,12 +54,19 @@  int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 	return 0;
 }
 
+static bool core_reg_offset_is_vreg(u64 off)
+{
+	return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
+		off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
+}
+
 static u64 core_reg_offset_from_id(u64 id)
 {
 	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
 }
 
-static int validate_core_offset(const struct kvm_one_reg *reg)
+static int validate_core_offset(const struct kvm_vcpu *vcpu,
+				const struct kvm_one_reg *reg)
 {
 	u64 off = core_reg_offset_from_id(reg->id);
 	int size;
@@ -91,11 +98,19 @@  static int validate_core_offset(const struct kvm_one_reg *reg)
 		return -EINVAL;
 	}
 
-	if (KVM_REG_SIZE(reg->id) == size &&
-	    IS_ALIGNED(off, size / sizeof(__u32)))
-		return 0;
+	if (KVM_REG_SIZE(reg->id) != size ||
+	    !IS_ALIGNED(off, size / sizeof(__u32)))
+		return -EINVAL;
 
-	return -EINVAL;
+	/*
+	 * The KVM_REG_ARM64_SVE regs must be used instead of
+	 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
+	 * SVE-enabled vcpus:
+	 */
+	if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
+		return -EINVAL;
+
+	return 0;
 }
 
 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
@@ -117,7 +132,7 @@  static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
 		return -ENOENT;
 
-	if (validate_core_offset(reg))
+	if (validate_core_offset(vcpu, reg))
 		return -EINVAL;
 
 	if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
@@ -142,7 +157,7 @@  static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
 		return -ENOENT;
 
-	if (validate_core_offset(reg))
+	if (validate_core_offset(vcpu, reg))
 		return -EINVAL;
 
 	if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
@@ -195,13 +210,22 @@  int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 	return -EINVAL;
 }
 
-static int kvm_arm_copy_core_reg_indices(u64 __user *uindices)
+static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
+				 u64 __user *uindices)
 {
 	unsigned int i;
 	int n = 0;
 	const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
 
 	for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
+		/*
+		 * The KVM_REG_ARM64_SVE regs must be used instead of
+		 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
+		 * SVE-enabled vcpus:
+		 */
+		if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i))
+			continue;
+
 		if (uindices) {
 			if (put_user(core_reg | i, uindices))
 				return -EFAULT;
@@ -214,9 +238,9 @@  static int kvm_arm_copy_core_reg_indices(u64 __user *uindices)
 	return n;
 }
 
-static unsigned long num_core_regs(void)
+static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
 {
-	return kvm_arm_copy_core_reg_indices(NULL);
+	return copy_core_reg_indices(vcpu, NULL);
 }
 
 /**
@@ -281,7 +305,7 @@  unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
 {
 	unsigned long res = 0;
 
-	res += num_core_regs();
+	res += num_core_regs(vcpu);
 	res += kvm_arm_num_sys_reg_descs(vcpu);
 	res += kvm_arm_get_fw_num_regs(vcpu);
 	res += NUM_TIMER_REGS;
@@ -298,7 +322,7 @@  int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
 {
 	int ret;
 
-	ret = kvm_arm_copy_core_reg_indices(uindices);
+	ret = copy_core_reg_indices(vcpu, uindices);
 	if (ret)
 		return ret;
 	uindices += ret;