diff mbox

[v2,1/2] KVM: arm/arm64: Factor out vgic_attr_regs_access functionality

Message ID 20160816173501.21521-2-christoffer.dall@linaro.org (mailing list archive)
State New, archived
Headers show

Commit Message

Christoffer Dall Aug. 16, 2016, 5:35 p.m. UTC
As we are about to deal with multiple data types and situations where
the vgic should not be initialized when doing userspace accesses on the
register attributes, factor out the functionality of
vgic_attr_regs_access into smaller bits which can be reused by a new
function later.

Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
---
 virt/kvm/arm/vgic/vgic-kvm-device.c | 100 ++++++++++++++++++++++++++----------
 1 file changed, 73 insertions(+), 27 deletions(-)

Comments

Eric Auger Aug. 17, 2016, 5:46 a.m. UTC | #1
Hi Christoffer,

On 16/08/2016 19:35, Christoffer Dall wrote:
> As we are about to deal with multiple data types and situations where
> the vgic should not be initialized when doing userspace accesses on the
> register attributes, factor out the functionality of
> vgic_attr_regs_access into smaller bits which can be reused by a new
> function later.
> 
> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
> ---
>  virt/kvm/arm/vgic/vgic-kvm-device.c | 100 ++++++++++++++++++++++++++----------
>  1 file changed, 73 insertions(+), 27 deletions(-)
> 
> diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
> index 1813f93..19fa331 100644
> --- a/virt/kvm/arm/vgic/vgic-kvm-device.c
> +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
> @@ -233,6 +233,67 @@ int kvm_register_vgic_device(unsigned long type)
>  	return ret;
>  }
>  
> +struct vgic_reg_attr {
> +	struct kvm_vcpu *vcpu;
> +	gpa_t addr;
> +};
> +
> +static int parse_vgic_v2_attr(struct kvm_device *dev,
> +			      struct kvm_device_attr *attr,
> +			      struct vgic_reg_attr *reg_attr)
> +{
> +	int cpuid;
> +
> +	cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
> +		 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
> +
> +	if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
> +		return -EINVAL;
> +
> +	reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
> +	reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
> +
> +	return 0;
> +}
> +
> +/* unlocks vcpus from @vcpu_lock_idx and smaller */
> +static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
> +{
> +	struct kvm_vcpu *tmp_vcpu;
> +
> +	for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
> +		tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
> +		mutex_unlock(&tmp_vcpu->mutex);
> +	}
> +}
> +
> +static void unlock_all_vcpus(struct kvm *kvm)
> +{
> +	unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
> +}
> +
> +/* Returns true if all vcpus were locked, false otherwise */
> +static bool lock_all_vcpus(struct kvm *kvm)
> +{
> +	struct kvm_vcpu *tmp_vcpu;
> +	int c;
> +
> +	/*
> +	 * Any time a vcpu is run, vcpu_load is called which tries to grab the
> +	 * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
> +	 * that no other VCPUs are run and fiddle with the vgic state while we
> +	 * access it.
> +	 */
> +	kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
> +		if (!mutex_trylock(&tmp_vcpu->mutex)) {
> +			unlock_vcpus(kvm, c - 1);
> +			return false;
> +		}
> +	}
> +
> +	return true;
> +}
> +
>  /** vgic_attr_regs_access: allows user space to read/write VGIC registers
>   *
>   * @dev: kvm device handle
> @@ -245,15 +306,17 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
>  				 struct kvm_device_attr *attr,
>  				 u32 *reg, bool is_write)
>  {
> +	struct vgic_reg_attr reg_attr;
>  	gpa_t addr;
> -	int cpuid, ret, c;
> -	struct kvm_vcpu *vcpu, *tmp_vcpu;
> -	int vcpu_lock_idx = -1;
> +	struct kvm_vcpu *vcpu;
> +	int ret;
>  
> -	cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
> -		 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
> -	vcpu = kvm_get_vcpu(dev->kvm, cpuid);
> -	addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
> +	ret = parse_vgic_v2_attr(dev, attr, &reg_attr);
> +	if (ret)
> +		return ret;
> +
> +	vcpu = reg_attr.vcpu;
> +	addr = reg_attr.addr;
>  
>  	mutex_lock(&dev->kvm->lock);
>  
> @@ -261,24 +324,11 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
>  	if (ret)
>  		goto out;
>  
> -	if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
> -		ret = -EINVAL;
> +	if (!lock_all_vcpus(dev->kvm)) {
> +		ret = -EBUSY;
>  		goto out;
>  	}
>  
> -	/*
> -	 * Any time a vcpu is run, vcpu_load is called which tries to grab the
> -	 * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
> -	 * that no other VCPUs are run and fiddle with the vgic state while we
> -	 * access it.
> -	 */
> -	ret = -EBUSY;
> -	kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
> -		if (!mutex_trylock(&tmp_vcpu->mutex))
> -			goto out;
> -		vcpu_lock_idx = c;
> -	}
> -
>  	switch (attr->group) {
>  	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
>  		ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
> @@ -291,12 +341,8 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
>  		break;
>  	}
>  
> +	unlock_all_vcpus(dev->kvm);
>  out:
> -	for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
> -		tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
> -		mutex_unlock(&tmp_vcpu->mutex);
> -	}
> -
>  	mutex_unlock(&dev->kvm->lock);
>  	return ret;
>  }
> 

Reviewed-by: Eric Auger <eric.auger@redhat.com>

Eric
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
index 1813f93..19fa331 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -233,6 +233,67 @@  int kvm_register_vgic_device(unsigned long type)
 	return ret;
 }
 
+struct vgic_reg_attr {
+	struct kvm_vcpu *vcpu;
+	gpa_t addr;
+};
+
+static int parse_vgic_v2_attr(struct kvm_device *dev,
+			      struct kvm_device_attr *attr,
+			      struct vgic_reg_attr *reg_attr)
+{
+	int cpuid;
+
+	cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+		 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+
+	if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
+		return -EINVAL;
+
+	reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+	reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+
+	return 0;
+}
+
+/* unlocks vcpus from @vcpu_lock_idx and smaller */
+static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
+{
+	struct kvm_vcpu *tmp_vcpu;
+
+	for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+		tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
+		mutex_unlock(&tmp_vcpu->mutex);
+	}
+}
+
+static void unlock_all_vcpus(struct kvm *kvm)
+{
+	unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
+}
+
+/* Returns true if all vcpus were locked, false otherwise */
+static bool lock_all_vcpus(struct kvm *kvm)
+{
+	struct kvm_vcpu *tmp_vcpu;
+	int c;
+
+	/*
+	 * Any time a vcpu is run, vcpu_load is called which tries to grab the
+	 * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
+	 * that no other VCPUs are run and fiddle with the vgic state while we
+	 * access it.
+	 */
+	kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
+		if (!mutex_trylock(&tmp_vcpu->mutex)) {
+			unlock_vcpus(kvm, c - 1);
+			return false;
+		}
+	}
+
+	return true;
+}
+
 /** vgic_attr_regs_access: allows user space to read/write VGIC registers
  *
  * @dev: kvm device handle
@@ -245,15 +306,17 @@  static int vgic_attr_regs_access(struct kvm_device *dev,
 				 struct kvm_device_attr *attr,
 				 u32 *reg, bool is_write)
 {
+	struct vgic_reg_attr reg_attr;
 	gpa_t addr;
-	int cpuid, ret, c;
-	struct kvm_vcpu *vcpu, *tmp_vcpu;
-	int vcpu_lock_idx = -1;
+	struct kvm_vcpu *vcpu;
+	int ret;
 
-	cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
-		 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
-	vcpu = kvm_get_vcpu(dev->kvm, cpuid);
-	addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+	ret = parse_vgic_v2_attr(dev, attr, &reg_attr);
+	if (ret)
+		return ret;
+
+	vcpu = reg_attr.vcpu;
+	addr = reg_attr.addr;
 
 	mutex_lock(&dev->kvm->lock);
 
@@ -261,24 +324,11 @@  static int vgic_attr_regs_access(struct kvm_device *dev,
 	if (ret)
 		goto out;
 
-	if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
-		ret = -EINVAL;
+	if (!lock_all_vcpus(dev->kvm)) {
+		ret = -EBUSY;
 		goto out;
 	}
 
-	/*
-	 * Any time a vcpu is run, vcpu_load is called which tries to grab the
-	 * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
-	 * that no other VCPUs are run and fiddle with the vgic state while we
-	 * access it.
-	 */
-	ret = -EBUSY;
-	kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
-		if (!mutex_trylock(&tmp_vcpu->mutex))
-			goto out;
-		vcpu_lock_idx = c;
-	}
-
 	switch (attr->group) {
 	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
 		ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
@@ -291,12 +341,8 @@  static int vgic_attr_regs_access(struct kvm_device *dev,
 		break;
 	}
 
+	unlock_all_vcpus(dev->kvm);
 out:
-	for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
-		tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
-		mutex_unlock(&tmp_vcpu->mutex);
-	}
-
 	mutex_unlock(&dev->kvm->lock);
 	return ret;
 }