diff mbox series

[4/4] KVM: PPC: Book3S HV: Don't take kvm->lock around kvm_for_each_vcpu

Message ID 20190523063632.GF19655@blackberry (mailing list archive)
State New, archived
Headers show
Series KVM: PPC: Book3S: Fix potential deadlocks | expand

Commit Message

Paul Mackerras May 23, 2019, 6:36 a.m. UTC
Currently the HV KVM code takes the kvm->lock around calls to
kvm_for_each_vcpu() and kvm_get_vcpu_by_id() (which can call
kvm_for_each_vcpu() internally).  However, that leads to a lock
order inversion problem, because these are called in contexts where
the vcpu mutex is held, but the vcpu mutexes nest within kvm->lock
according to Documentation/virtual/kvm/locking.txt.  Hence there
is a possibility of deadlock.

To fix this, we simply don't take the kvm->lock mutex around these
calls.  This is safe because the implementations of kvm_for_each_vcpu()
and kvm_get_vcpu_by_id() have been designed to be able to be called
locklessly.

Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
---
 arch/powerpc/kvm/book3s_hv.c | 9 +--------
 1 file changed, 1 insertion(+), 8 deletions(-)

Comments

Cédric Le Goater May 23, 2019, 7:11 a.m. UTC | #1
On 5/23/19 8:36 AM, Paul Mackerras wrote:
> Currently the HV KVM code takes the kvm->lock around calls to
> kvm_for_each_vcpu() and kvm_get_vcpu_by_id() (which can call
> kvm_for_each_vcpu() internally).  However, that leads to a lock
> order inversion problem, because these are called in contexts where
> the vcpu mutex is held, but the vcpu mutexes nest within kvm->lock
> according to Documentation/virtual/kvm/locking.txt.  Hence there
> is a possibility of deadlock.
> 
> To fix this, we simply don't take the kvm->lock mutex around these
> calls.  This is safe because the implementations of kvm_for_each_vcpu()
> and kvm_get_vcpu_by_id() have been designed to be able to be called
> locklessly.

Yes.

Reviewed-by: Cédric Le Goater <clg@kaod.org>

Thanks,

C.

> 
> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
> ---
>  arch/powerpc/kvm/book3s_hv.c | 9 +--------
>  1 file changed, 1 insertion(+), 8 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index b1c0a9b..27054d3 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -446,12 +446,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
>  
>  static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
>  {
> -	struct kvm_vcpu *ret;
> -
> -	mutex_lock(&kvm->lock);
> -	ret = kvm_get_vcpu_by_id(kvm, id);
> -	mutex_unlock(&kvm->lock);
> -	return ret;
> +	return kvm_get_vcpu_by_id(kvm, id);
>  }
>  
>  static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
> @@ -1583,7 +1578,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
>  	struct kvmppc_vcore *vc = vcpu->arch.vcore;
>  	u64 mask;
>  
> -	mutex_lock(&kvm->lock);
>  	spin_lock(&vc->lock);
>  	/*
>  	 * If ILE (interrupt little-endian) has changed, update the
> @@ -1623,7 +1617,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
>  		mask &= 0xFFFFFFFF;
>  	vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
>  	spin_unlock(&vc->lock);
> -	mutex_unlock(&kvm->lock);
>  }
>  
>  static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
>
diff mbox series

Patch

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index b1c0a9b..27054d3 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -446,12 +446,7 @@  static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
 
 static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
 {
-	struct kvm_vcpu *ret;
-
-	mutex_lock(&kvm->lock);
-	ret = kvm_get_vcpu_by_id(kvm, id);
-	mutex_unlock(&kvm->lock);
-	return ret;
+	return kvm_get_vcpu_by_id(kvm, id);
 }
 
 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
@@ -1583,7 +1578,6 @@  static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 	u64 mask;
 
-	mutex_lock(&kvm->lock);
 	spin_lock(&vc->lock);
 	/*
 	 * If ILE (interrupt little-endian) has changed, update the
@@ -1623,7 +1617,6 @@  static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
 		mask &= 0xFFFFFFFF;
 	vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
 	spin_unlock(&vc->lock);
-	mutex_unlock(&kvm->lock);
 }
 
 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,