@@ -290,9 +290,10 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
#define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
/*
- * number of register slices required to cover each whole SVE register on vcpu
- * NOTE: If you are tempted to modify this, you must also to rework
- * sve_reg_to_region() to match:
+ * Number of register slices required to cover each whole SVE register.
+ * NOTE: Only the first slice every exists, for now.
+ * If you are tempted to modify this, you must also rework sve_reg_to_region()
+ * to match:
*/
#define vcpu_sve_slices(vcpu) 1
@@ -334,8 +335,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
/* Verify that we match the UAPI header: */
BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
- /* Only the first slice ever exists, for now: */
- if ((reg->id & SVE_REG_SLICE_MASK) != 0)
+ if ((reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT;
vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
@@ -520,7 +520,6 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
{
- /* Only the first slice ever exists, for now */
const unsigned int slices = vcpu_sve_slices(vcpu);
if (!vcpu_has_sve(vcpu))
@@ -536,7 +535,6 @@ static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
u64 __user *uindices)
{
- /* Only the first slice ever exists, for now */
const unsigned int slices = vcpu_sve_slices(vcpu);
u64 reg;
unsigned int i, n;
@@ -555,7 +553,6 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
reg = KVM_REG_ARM64_SVE_VLS;
if (put_user(reg, uindices++))
return -EFAULT;
-
++num_regs;
for (i = 0; i < slices; i++) {
@@ -563,7 +560,6 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
reg = KVM_REG_ARM64_SVE_ZREG(n, i);
if (put_user(reg, uindices++))
return -EFAULT;
-
num_regs++;
}
@@ -571,14 +567,12 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
reg = KVM_REG_ARM64_SVE_PREG(n, i);
if (put_user(reg, uindices++))
return -EFAULT;
-
num_regs++;
}
reg = KVM_REG_ARM64_SVE_FFR(i);
if (put_user(reg, uindices++))
return -EFAULT;
-
num_regs++;
}
@@ -645,7 +639,6 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg);
case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
- default: break; /* fall through */
}
if (is_timer_reg(reg->id))
@@ -664,7 +657,6 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg);
case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
- default: break; /* fall through */
}
if (is_timer_reg(reg->id))