@@ -241,6 +241,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
gpa_t addr;
int cpuid, ret, c;
struct kvm_vcpu *vcpu, *tmp_vcpu;
+ int vcpu_lock_idx = -1;
cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
KVM_DEV_ARM_VGIC_CPUID_SHIFT;
@@ -259,17 +260,16 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
}
/*
- * Ensure that no other VCPU is running by checking the vcpu->cpu
- * field. If no other VPCUs are running we can safely access the VGIC
- * state, because even if another VPU is run after this point, that
- * VCPU will not touch the vgic state, because it will block on
- * getting the vgic->lock in kvm_vgic_sync_hwstate().
+ * Any time a vcpu is run, vcpu_load is called which tries to grab the
+ * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
+ * that no other VCPUs are run and fiddle with the vgic state while we
+ * access it.
*/
+ ret = -EBUSY;
kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
- if (unlikely(tmp_vcpu->cpu != -1)) {
- ret = -EBUSY;
+ if (!mutex_trylock(&tmp_vcpu->mutex))
goto out;
- }
+ vcpu_lock_idx = c;
}
switch (attr->group) {
@@ -285,6 +285,11 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
}
out:
+ for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+ tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
+ mutex_unlock(&tmp_vcpu->mutex);
+ }
+
mutex_unlock(&dev->kvm->lock);
return ret;
}