diff mbox series

[v2,07/14] KVM: arm64/sve: Make register ioctl access errors more consistent

Message ID 1555603631-8107-8-git-send-email-Dave.Martin@arm.com (mailing list archive)
State Mainlined, archived
Commit 52110aa95948deba724739fc9933070c3e2c7239
Headers show
Series KVM: arm64: SVE cleanups | expand

Commit Message

Dave Martin April 18, 2019, 4:07 p.m. UTC
Currently, the way error codes are generated when processing the
SVE register access ioctls in a bit haphazard.

This patch refactors the code so that the behaviour is more
consistent: now, -EINVAL should be returned only for unrecognised
register IDs or when some other runtime error occurs.  -ENOENT is
returned for register IDs that are recognised, but whose
corresponding register (or slice) does not exist for the vcpu.

To this end, in {get,set}_sve_reg() we now delegate the
vcpu_has_sve() check down into {get,set}_sve_vls() and
sve_reg_to_region().  The KVM_REG_ARM64_SVE_VLS special case is
picked off first, then sve_reg_to_region() plays the role of
exhaustively validating or rejecting the register ID and (where
accepted) computing the applicable register region as before.

sve_reg_to_region() is rearranged so that -ENOENT or -EPERM is not
returned prematurely, before checking whether reg->id is in a
recognised range.

-EPERM is now only returned when an attempt is made to access an
actually existing register slice on an unfinalized vcpu.

Fixes: e1c9c98345b3 ("KVM: arm64/sve: Add SVE support to register access ioctl interface")
Fixes: 9033bba4b535 ("KVM: arm64/sve: Add pseudo-register for the guest's vector lengths")
Suggested-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
---
 arch/arm64/kvm/guest.c | 52 ++++++++++++++++++++++++++++++--------------------
 1 file changed, 31 insertions(+), 21 deletions(-)

Comments

Alex Bennée April 25, 2019, 12:30 p.m. UTC | #1
Dave Martin <Dave.Martin@arm.com> writes:

> Currently, the way error codes are generated when processing the
> SVE register access ioctls in a bit haphazard.
>
> This patch refactors the code so that the behaviour is more
> consistent: now, -EINVAL should be returned only for unrecognised
> register IDs or when some other runtime error occurs.  -ENOENT is
> returned for register IDs that are recognised, but whose
> corresponding register (or slice) does not exist for the vcpu.
>
> To this end, in {get,set}_sve_reg() we now delegate the
> vcpu_has_sve() check down into {get,set}_sve_vls() and
> sve_reg_to_region().  The KVM_REG_ARM64_SVE_VLS special case is
> picked off first, then sve_reg_to_region() plays the role of
> exhaustively validating or rejecting the register ID and (where
> accepted) computing the applicable register region as before.
>
> sve_reg_to_region() is rearranged so that -ENOENT or -EPERM is not
> returned prematurely, before checking whether reg->id is in a
> recognised range.
>
> -EPERM is now only returned when an attempt is made to access an
> actually existing register slice on an unfinalized vcpu.
>
> Fixes: e1c9c98345b3 ("KVM: arm64/sve: Add SVE support to register access ioctl interface")
> Fixes: 9033bba4b535 ("KVM: arm64/sve: Add pseudo-register for the guest's vector lengths")
> Suggested-by: Andrew Jones <drjones@redhat.com>
> Signed-off-by: Dave Martin <Dave.Martin@arm.com>
> Reviewed-by: Andrew Jones <drjones@redhat.com>
> ---
>  arch/arm64/kvm/guest.c | 52 ++++++++++++++++++++++++++++++--------------------
>  1 file changed, 31 insertions(+), 21 deletions(-)
>
> diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
> index f5ff7ae..e45a042 100644
> --- a/arch/arm64/kvm/guest.c
> +++ b/arch/arm64/kvm/guest.c
> @@ -221,6 +221,9 @@ static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
>  	unsigned int max_vq, vq;
>  	u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)];
>
> +	if (!vcpu_has_sve(vcpu))
> +		return -ENOENT;
> +
>  	if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
>  		return -EINVAL;
>
> @@ -242,6 +245,9 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
>  	unsigned int max_vq, vq;
>  	u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)];
>
> +	if (!vcpu_has_sve(vcpu))
> +		return -ENOENT;
> +
>  	if (kvm_arm_vcpu_sve_finalized(vcpu))
>  		return -EPERM; /* too late! */
>
> @@ -304,7 +310,10 @@ struct sve_state_reg_region {
>  	unsigned int upad;	/* extra trailing padding in user memory */
>  };
>
> -/* Get sanitised bounds for user/kernel SVE register copy */
> +/*
> + * Validate SVE register ID and get sanitised bounds for user/kernel SVE
> + * register copy
> + */
>  static int sve_reg_to_region(struct sve_state_reg_region *region,
>  			     struct kvm_vcpu *vcpu,
>  			     const struct kvm_one_reg *reg)
> @@ -335,25 +344,30 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
>  	/* Verify that we match the UAPI header: */
>  	BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
>
> -	if ((reg->id & SVE_REG_SLICE_MASK) > 0)
> -		return -ENOENT;
> -
> -	vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
> -
>  	reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
>
>  	if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
> +		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
> +			return -ENOENT;
> +
> +		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
> +
>  		reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
>  				SVE_SIG_REGS_OFFSET;
>  		reqlen = KVM_SVE_ZREG_SIZE;
>  		maxlen = SVE_SIG_ZREG_SIZE(vq);
>  	} else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
> +		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
> +			return -ENOENT;
> +
> +		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
> +

I suppose you could argue for a:

	if (reg->id >= zreg_id_min && reg->id <= preg_id_max) {
		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
			return -ENOENT;

		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);

                if (reg->id <= zreg_id_max) {
			reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
				SVE_SIG_REGS_OFFSET;
			reqlen = KVM_SVE_ZREG_SIZE;
			maxlen = SVE_SIG_ZREG_SIZE(vq);
                } else {
			reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
				SVE_SIG_REGS_OFFSET;
			reqlen = KVM_SVE_PREG_SIZE;
			maxlen = SVE_SIG_PREG_SIZE(vq);
		}
	} else {
		return -EINVAL;
	}

but only for minimal DRY reasons.

>  		reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
>  				SVE_SIG_REGS_OFFSET;
>  		reqlen = KVM_SVE_PREG_SIZE;
>  		maxlen = SVE_SIG_PREG_SIZE(vq);
>  	} else {
> -		return -ENOENT;
> +		return -EINVAL;
>  	}
>
>  	sve_state_size = vcpu_sve_state_size(vcpu);
> @@ -369,24 +383,22 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
>
>  static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
>  {
> +	int ret;
>  	struct sve_state_reg_region region;
>  	char __user *uptr = (char __user *)reg->addr;
>
> -	if (!vcpu_has_sve(vcpu))
> -		return -ENOENT;
> -
>  	/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
>  	if (reg->id == KVM_REG_ARM64_SVE_VLS)
>  		return get_sve_vls(vcpu, reg);
>
> -	/* Otherwise, reg is an architectural SVE register... */
> +	/* Try to interpret reg ID as an architectural SVE register... */
> +	ret = sve_reg_to_region(&region, vcpu, reg);
> +	if (ret)
> +		return ret;
>
>  	if (!kvm_arm_vcpu_sve_finalized(vcpu))
>  		return -EPERM;
>
> -	if (sve_reg_to_region(&region, vcpu, reg))
> -		return -ENOENT;
> -
>  	if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
>  			 region.klen) ||
>  	    clear_user(uptr + region.klen, region.upad))
> @@ -397,24 +409,22 @@ static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
>
>  static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
>  {
> +	int ret;
>  	struct sve_state_reg_region region;
>  	const char __user *uptr = (const char __user *)reg->addr;
>
> -	if (!vcpu_has_sve(vcpu))
> -		return -ENOENT;
> -
>  	/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
>  	if (reg->id == KVM_REG_ARM64_SVE_VLS)
>  		return set_sve_vls(vcpu, reg);
>
> -	/* Otherwise, reg is an architectural SVE register... */
> +	/* Try to interpret reg ID as an architectural SVE register... */
> +	ret = sve_reg_to_region(&region, vcpu, reg);
> +	if (ret)
> +		return ret;
>
>  	if (!kvm_arm_vcpu_sve_finalized(vcpu))
>  		return -EPERM;
>
> -	if (sve_reg_to_region(&region, vcpu, reg))
> -		return -ENOENT;
> -
>  	if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
>  			   region.klen))
>  		return -EFAULT;


--
Alex Bennée
Dave Martin April 25, 2019, 1:04 p.m. UTC | #2
On Thu, Apr 25, 2019 at 01:30:29PM +0100, Alex Bennée wrote:
> 
> Dave Martin <Dave.Martin@arm.com> writes:
> 
> > Currently, the way error codes are generated when processing the
> > SVE register access ioctls in a bit haphazard.
> >
> > This patch refactors the code so that the behaviour is more
> > consistent: now, -EINVAL should be returned only for unrecognised
> > register IDs or when some other runtime error occurs.  -ENOENT is
> > returned for register IDs that are recognised, but whose
> > corresponding register (or slice) does not exist for the vcpu.
> >
> > To this end, in {get,set}_sve_reg() we now delegate the
> > vcpu_has_sve() check down into {get,set}_sve_vls() and
> > sve_reg_to_region().  The KVM_REG_ARM64_SVE_VLS special case is
> > picked off first, then sve_reg_to_region() plays the role of
> > exhaustively validating or rejecting the register ID and (where
> > accepted) computing the applicable register region as before.
> >
> > sve_reg_to_region() is rearranged so that -ENOENT or -EPERM is not
> > returned prematurely, before checking whether reg->id is in a
> > recognised range.
> >
> > -EPERM is now only returned when an attempt is made to access an
> > actually existing register slice on an unfinalized vcpu.
> >
> > Fixes: e1c9c98345b3 ("KVM: arm64/sve: Add SVE support to register access ioctl interface")
> > Fixes: 9033bba4b535 ("KVM: arm64/sve: Add pseudo-register for the guest's vector lengths")
> > Suggested-by: Andrew Jones <drjones@redhat.com>
> > Signed-off-by: Dave Martin <Dave.Martin@arm.com>
> > Reviewed-by: Andrew Jones <drjones@redhat.com>

[...]

> > @@ -335,25 +344,30 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
> >  	/* Verify that we match the UAPI header: */
> >  	BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
> >
> > -	if ((reg->id & SVE_REG_SLICE_MASK) > 0)
> > -		return -ENOENT;
> > -
> > -	vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
> > -
> >  	reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
> >
> >  	if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
> > +		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
> > +			return -ENOENT;
> > +
> > +		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
> > +
> >  		reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
> >  				SVE_SIG_REGS_OFFSET;
> >  		reqlen = KVM_SVE_ZREG_SIZE;
> >  		maxlen = SVE_SIG_ZREG_SIZE(vq);
> >  	} else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
> > +		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
> > +			return -ENOENT;
> > +
> > +		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
> > +
> 
> I suppose you could argue for a:
> 
> 	if (reg->id >= zreg_id_min && reg->id <= preg_id_max) {
> 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
> 			return -ENOENT;
> 
> 		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
> 
>                 if (reg->id <= zreg_id_max) {
> 			reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
> 				SVE_SIG_REGS_OFFSET;
> 			reqlen = KVM_SVE_ZREG_SIZE;
> 			maxlen = SVE_SIG_ZREG_SIZE(vq);
>                 } else {
> 			reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
> 				SVE_SIG_REGS_OFFSET;
> 			reqlen = KVM_SVE_PREG_SIZE;
> 			maxlen = SVE_SIG_PREG_SIZE(vq);
> 		}
> 	} else {
> 		return -EINVAL;
> 	}
> 
> but only for minimal DRY reasons.

Agreed, but that bakes in another assumption: that the ZREG and PREG ID
ranges are contiguous.

I preferred to keep the number of assumptions down.

Althoug the resulting code wasn't ideal, the actual amount of
duplication that I ended up with here seemed low enough as to be
acceptable (though opinions can differ on that).

[...]

Cheers
---Dave
Alex Bennée April 25, 2019, 3:04 p.m. UTC | #3
Dave Martin <Dave.Martin@arm.com> writes:

> On Thu, Apr 25, 2019 at 01:30:29PM +0100, Alex Bennée wrote:
>>
>> Dave Martin <Dave.Martin@arm.com> writes:
>>
>> > Currently, the way error codes are generated when processing the
>> > SVE register access ioctls in a bit haphazard.
>> >
>> > This patch refactors the code so that the behaviour is more
>> > consistent: now, -EINVAL should be returned only for unrecognised
>> > register IDs or when some other runtime error occurs.  -ENOENT is
>> > returned for register IDs that are recognised, but whose
>> > corresponding register (or slice) does not exist for the vcpu.
>> >
>> > To this end, in {get,set}_sve_reg() we now delegate the
>> > vcpu_has_sve() check down into {get,set}_sve_vls() and
>> > sve_reg_to_region().  The KVM_REG_ARM64_SVE_VLS special case is
>> > picked off first, then sve_reg_to_region() plays the role of
>> > exhaustively validating or rejecting the register ID and (where
>> > accepted) computing the applicable register region as before.
>> >
>> > sve_reg_to_region() is rearranged so that -ENOENT or -EPERM is not
>> > returned prematurely, before checking whether reg->id is in a
>> > recognised range.
>> >
>> > -EPERM is now only returned when an attempt is made to access an
>> > actually existing register slice on an unfinalized vcpu.
>> >
>> > Fixes: e1c9c98345b3 ("KVM: arm64/sve: Add SVE support to register access ioctl interface")
>> > Fixes: 9033bba4b535 ("KVM: arm64/sve: Add pseudo-register for the guest's vector lengths")
>> > Suggested-by: Andrew Jones <drjones@redhat.com>
>> > Signed-off-by: Dave Martin <Dave.Martin@arm.com>
>> > Reviewed-by: Andrew Jones <drjones@redhat.com>
>
> [...]
>
>> > @@ -335,25 +344,30 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
>> >  	/* Verify that we match the UAPI header: */
>> >  	BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
>> >
>> > -	if ((reg->id & SVE_REG_SLICE_MASK) > 0)
>> > -		return -ENOENT;
>> > -
>> > -	vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
>> > -
>> >  	reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
>> >
>> >  	if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
>> > +		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
>> > +			return -ENOENT;
>> > +
>> > +		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
>> > +
>> >  		reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
>> >  				SVE_SIG_REGS_OFFSET;
>> >  		reqlen = KVM_SVE_ZREG_SIZE;
>> >  		maxlen = SVE_SIG_ZREG_SIZE(vq);
>> >  	} else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
>> > +		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
>> > +			return -ENOENT;
>> > +
>> > +		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
>> > +
>>
>> I suppose you could argue for a:
>>
>> 	if (reg->id >= zreg_id_min && reg->id <= preg_id_max) {
>> 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
>> 			return -ENOENT;
>>
>> 		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
>>
>>                 if (reg->id <= zreg_id_max) {
>> 			reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
>> 				SVE_SIG_REGS_OFFSET;
>> 			reqlen = KVM_SVE_ZREG_SIZE;
>> 			maxlen = SVE_SIG_ZREG_SIZE(vq);
>>                 } else {
>> 			reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
>> 				SVE_SIG_REGS_OFFSET;
>> 			reqlen = KVM_SVE_PREG_SIZE;
>> 			maxlen = SVE_SIG_PREG_SIZE(vq);
>> 		}
>> 	} else {
>> 		return -EINVAL;
>> 	}
>>
>> but only for minimal DRY reasons.
>
> Agreed, but that bakes in another assumption: that the ZREG and PREG ID
> ranges are contiguous.

Ahh I'd misread:

  /* reg ID ranges for P- registers and FFR (which are contiguous) */

However these are defined in the UABI:

  /* Z- and P-regs occupy blocks at the following offsets within this range: */
  #define KVM_REG_ARM64_SVE_ZREG_BASE	0
  #define KVM_REG_ARM64_SVE_PREG_BASE	0x400
  #define KVM_REG_ARM64_SVE_FFR_BASE	0x600

so there position is pretty fixed now right?

> I preferred to keep the number of assumptions down.
>
> Althoug the resulting code wasn't ideal, the actual amount of
> duplication that I ended up with here seemed low enough as to be
> acceptable (though opinions can differ on that).

It's no biggie ;-)

--
Alex Bennée
Dave Martin April 25, 2019, 3:27 p.m. UTC | #4
On Thu, Apr 25, 2019 at 04:04:36PM +0100, Alex Bennée wrote:
> 
> Dave Martin <Dave.Martin@arm.com> writes:
> 
> > On Thu, Apr 25, 2019 at 01:30:29PM +0100, Alex Bennée wrote:
> >>
> >> Dave Martin <Dave.Martin@arm.com> writes:
> >>
> >> > Currently, the way error codes are generated when processing the
> >> > SVE register access ioctls in a bit haphazard.
> >> >
> >> > This patch refactors the code so that the behaviour is more
> >> > consistent: now, -EINVAL should be returned only for unrecognised
> >> > register IDs or when some other runtime error occurs.  -ENOENT is
> >> > returned for register IDs that are recognised, but whose
> >> > corresponding register (or slice) does not exist for the vcpu.
> >> >
> >> > To this end, in {get,set}_sve_reg() we now delegate the
> >> > vcpu_has_sve() check down into {get,set}_sve_vls() and
> >> > sve_reg_to_region().  The KVM_REG_ARM64_SVE_VLS special case is
> >> > picked off first, then sve_reg_to_region() plays the role of
> >> > exhaustively validating or rejecting the register ID and (where
> >> > accepted) computing the applicable register region as before.
> >> >
> >> > sve_reg_to_region() is rearranged so that -ENOENT or -EPERM is not
> >> > returned prematurely, before checking whether reg->id is in a
> >> > recognised range.
> >> >
> >> > -EPERM is now only returned when an attempt is made to access an
> >> > actually existing register slice on an unfinalized vcpu.
> >> >
> >> > Fixes: e1c9c98345b3 ("KVM: arm64/sve: Add SVE support to register access ioctl interface")
> >> > Fixes: 9033bba4b535 ("KVM: arm64/sve: Add pseudo-register for the guest's vector lengths")
> >> > Suggested-by: Andrew Jones <drjones@redhat.com>
> >> > Signed-off-by: Dave Martin <Dave.Martin@arm.com>
> >> > Reviewed-by: Andrew Jones <drjones@redhat.com>
> >
> > [...]
> >
> >> > @@ -335,25 +344,30 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
> >> >  	/* Verify that we match the UAPI header: */
> >> >  	BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
> >> >
> >> > -	if ((reg->id & SVE_REG_SLICE_MASK) > 0)
> >> > -		return -ENOENT;
> >> > -
> >> > -	vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
> >> > -
> >> >  	reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
> >> >
> >> >  	if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
> >> > +		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
> >> > +			return -ENOENT;
> >> > +
> >> > +		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
> >> > +
> >> >  		reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
> >> >  				SVE_SIG_REGS_OFFSET;
> >> >  		reqlen = KVM_SVE_ZREG_SIZE;
> >> >  		maxlen = SVE_SIG_ZREG_SIZE(vq);
> >> >  	} else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
> >> > +		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
> >> > +			return -ENOENT;
> >> > +
> >> > +		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
> >> > +
> >>
> >> I suppose you could argue for a:
> >>
> >> 	if (reg->id >= zreg_id_min && reg->id <= preg_id_max) {
> >> 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
> >> 			return -ENOENT;
> >>
> >> 		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
> >>
> >>                 if (reg->id <= zreg_id_max) {
> >> 			reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
> >> 				SVE_SIG_REGS_OFFSET;
> >> 			reqlen = KVM_SVE_ZREG_SIZE;
> >> 			maxlen = SVE_SIG_ZREG_SIZE(vq);
> >>                 } else {
> >> 			reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
> >> 				SVE_SIG_REGS_OFFSET;
> >> 			reqlen = KVM_SVE_PREG_SIZE;
> >> 			maxlen = SVE_SIG_PREG_SIZE(vq);
> >> 		}
> >> 	} else {
> >> 		return -EINVAL;
> >> 	}
> >>
> >> but only for minimal DRY reasons.
> >
> > Agreed, but that bakes in another assumption: that the ZREG and PREG ID
> > ranges are contiguous.
> 
> Ahh I'd misread:
> 
>   /* reg ID ranges for P- registers and FFR (which are contiguous) */
> 
> However these are defined in the UABI:
> 
>   /* Z- and P-regs occupy blocks at the following offsets within this range: */
>   #define KVM_REG_ARM64_SVE_ZREG_BASE	0
>   #define KVM_REG_ARM64_SVE_PREG_BASE	0x400
>   #define KVM_REG_ARM64_SVE_FFR_BASE	0x600
> 
> so there position is pretty fixed now right?

Yes.  I couldn't really decide how much we consider this a to be
a happy accident.

In hindsight, simply making FFR == "P16" official may have been the
simpler way to go, but it should work whether code explicitly assumes
this or not.  As of today, I prefer not to encourage it too much,
but this is more for philosophical than practical reasons.

[...]

Cheers
---Dave
diff mbox series

Patch

diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index f5ff7ae..e45a042 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -221,6 +221,9 @@  static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 	unsigned int max_vq, vq;
 	u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)];
 
+	if (!vcpu_has_sve(vcpu))
+		return -ENOENT;
+
 	if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
 		return -EINVAL;
 
@@ -242,6 +245,9 @@  static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 	unsigned int max_vq, vq;
 	u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)];
 
+	if (!vcpu_has_sve(vcpu))
+		return -ENOENT;
+
 	if (kvm_arm_vcpu_sve_finalized(vcpu))
 		return -EPERM; /* too late! */
 
@@ -304,7 +310,10 @@  struct sve_state_reg_region {
 	unsigned int upad;	/* extra trailing padding in user memory */
 };
 
-/* Get sanitised bounds for user/kernel SVE register copy */
+/*
+ * Validate SVE register ID and get sanitised bounds for user/kernel SVE
+ * register copy
+ */
 static int sve_reg_to_region(struct sve_state_reg_region *region,
 			     struct kvm_vcpu *vcpu,
 			     const struct kvm_one_reg *reg)
@@ -335,25 +344,30 @@  static int sve_reg_to_region(struct sve_state_reg_region *region,
 	/* Verify that we match the UAPI header: */
 	BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
 
-	if ((reg->id & SVE_REG_SLICE_MASK) > 0)
-		return -ENOENT;
-
-	vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
-
 	reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
 
 	if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
+		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
+			return -ENOENT;
+
+		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+
 		reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
 				SVE_SIG_REGS_OFFSET;
 		reqlen = KVM_SVE_ZREG_SIZE;
 		maxlen = SVE_SIG_ZREG_SIZE(vq);
 	} else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
+		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
+			return -ENOENT;
+
+		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+
 		reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
 				SVE_SIG_REGS_OFFSET;
 		reqlen = KVM_SVE_PREG_SIZE;
 		maxlen = SVE_SIG_PREG_SIZE(vq);
 	} else {
-		return -ENOENT;
+		return -EINVAL;
 	}
 
 	sve_state_size = vcpu_sve_state_size(vcpu);
@@ -369,24 +383,22 @@  static int sve_reg_to_region(struct sve_state_reg_region *region,
 
 static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
+	int ret;
 	struct sve_state_reg_region region;
 	char __user *uptr = (char __user *)reg->addr;
 
-	if (!vcpu_has_sve(vcpu))
-		return -ENOENT;
-
 	/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
 	if (reg->id == KVM_REG_ARM64_SVE_VLS)
 		return get_sve_vls(vcpu, reg);
 
-	/* Otherwise, reg is an architectural SVE register... */
+	/* Try to interpret reg ID as an architectural SVE register... */
+	ret = sve_reg_to_region(&region, vcpu, reg);
+	if (ret)
+		return ret;
 
 	if (!kvm_arm_vcpu_sve_finalized(vcpu))
 		return -EPERM;
 
-	if (sve_reg_to_region(&region, vcpu, reg))
-		return -ENOENT;
-
 	if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
 			 region.klen) ||
 	    clear_user(uptr + region.klen, region.upad))
@@ -397,24 +409,22 @@  static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 
 static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
+	int ret;
 	struct sve_state_reg_region region;
 	const char __user *uptr = (const char __user *)reg->addr;
 
-	if (!vcpu_has_sve(vcpu))
-		return -ENOENT;
-
 	/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
 	if (reg->id == KVM_REG_ARM64_SVE_VLS)
 		return set_sve_vls(vcpu, reg);
 
-	/* Otherwise, reg is an architectural SVE register... */
+	/* Try to interpret reg ID as an architectural SVE register... */
+	ret = sve_reg_to_region(&region, vcpu, reg);
+	if (ret)
+		return ret;
 
 	if (!kvm_arm_vcpu_sve_finalized(vcpu))
 		return -EPERM;
 
-	if (sve_reg_to_region(&region, vcpu, reg))
-		return -ENOENT;
-
 	if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
 			   region.klen))
 		return -EFAULT;