diff mbox series

KVM: x86/xen: Take srcu lock when accessing kvm_memslots()

Message ID 1619161883-5963-1-git-send-email-wanpengli@tencent.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/xen: Take srcu lock when accessing kvm_memslots() | expand

Commit Message

Wanpeng Li April 23, 2021, 7:11 a.m. UTC
From: Wanpeng Li <wanpengli@tencent.com>

kvm_memslots() will be called by kvm_write_guest_offset_cached() so 
take the srcu lock.

Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
---
 arch/x86/kvm/xen.c | 18 ++++++++++++++----
 1 file changed, 14 insertions(+), 4 deletions(-)

Comments

Paolo Bonzini April 23, 2021, 8:13 a.m. UTC | #1
On 23/04/21 09:11, Wanpeng Li wrote:
> From: Wanpeng Li <wanpengli@tencent.com>
> 
> kvm_memslots() will be called by kvm_write_guest_offset_cached() so
> take the srcu lock.
> 
> Signed-off-by: Wanpeng Li <wanpengli@tencent.com>

Good catch.  But I would pull it from kvm_steal_time_set_preempted to 
kvm_arch_vcpu_put instead.

Paolo

> ---
>   arch/x86/kvm/xen.c | 18 ++++++++++++++----
>   1 file changed, 14 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
> index ae17250..d0df782 100644
> --- a/arch/x86/kvm/xen.c
> +++ b/arch/x86/kvm/xen.c
> @@ -96,6 +96,7 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
>   	struct kvm_vcpu_xen *vx = &v->arch.xen;
>   	uint64_t state_entry_time;
>   	unsigned int offset;
> +	int idx;
>   
>   	kvm_xen_update_runstate(v, state);
>   
> @@ -133,10 +134,16 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
>   	BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state_entry_time) !=
>   		     sizeof(state_entry_time));
>   
> +	/*
> +	 * Take the srcu lock as memslots will be accessed to check the gfn
> +	 * cache generation against the memslots generation.
> +	 */
> +	idx = srcu_read_lock(&v->kvm->srcu);
> +
>   	if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
>   					  &state_entry_time, offset,
>   					  sizeof(state_entry_time)))
> -		return;
> +		goto out;
>   	smp_wmb();
>   
>   	/*
> @@ -154,7 +161,7 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
>   					  &vx->current_runstate,
>   					  offsetof(struct vcpu_runstate_info, state),
>   					  sizeof(vx->current_runstate)))
> -		return;
> +		goto out;
>   
>   	/*
>   	 * Write the actual runstate times immediately after the
> @@ -173,7 +180,7 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
>   					  &vx->runstate_times[0],
>   					  offset + sizeof(u64),
>   					  sizeof(vx->runstate_times)))
> -		return;
> +		goto out;
>   
>   	smp_wmb();
>   
> @@ -186,7 +193,10 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
>   	if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
>   					  &state_entry_time, offset,
>   					  sizeof(state_entry_time)))
> -		return;
> +		goto out;
> +
> +out:
> +	srcu_read_unlock(&v->kvm->srcu, idx);
>   }
>   
>   int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
>
Wanpeng Li April 23, 2021, 8:15 a.m. UTC | #2
On Fri, 23 Apr 2021 at 16:13, Paolo Bonzini <pbonzini@redhat.com> wrote:
>
> On 23/04/21 09:11, Wanpeng Li wrote:
> > From: Wanpeng Li <wanpengli@tencent.com>
> >
> > kvm_memslots() will be called by kvm_write_guest_offset_cached() so
> > take the srcu lock.
> >
> > Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
>
> Good catch.  But I would pull it from kvm_steal_time_set_preempted to
> kvm_arch_vcpu_put instead.

Will do. :)

    Wanpeng
diff mbox series

Patch

diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index ae17250..d0df782 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -96,6 +96,7 @@  void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
 	struct kvm_vcpu_xen *vx = &v->arch.xen;
 	uint64_t state_entry_time;
 	unsigned int offset;
+	int idx;
 
 	kvm_xen_update_runstate(v, state);
 
@@ -133,10 +134,16 @@  void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
 	BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state_entry_time) !=
 		     sizeof(state_entry_time));
 
+	/*
+	 * Take the srcu lock as memslots will be accessed to check the gfn
+	 * cache generation against the memslots generation.
+	 */
+	idx = srcu_read_lock(&v->kvm->srcu);
+
 	if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
 					  &state_entry_time, offset,
 					  sizeof(state_entry_time)))
-		return;
+		goto out;
 	smp_wmb();
 
 	/*
@@ -154,7 +161,7 @@  void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
 					  &vx->current_runstate,
 					  offsetof(struct vcpu_runstate_info, state),
 					  sizeof(vx->current_runstate)))
-		return;
+		goto out;
 
 	/*
 	 * Write the actual runstate times immediately after the
@@ -173,7 +180,7 @@  void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
 					  &vx->runstate_times[0],
 					  offset + sizeof(u64),
 					  sizeof(vx->runstate_times)))
-		return;
+		goto out;
 
 	smp_wmb();
 
@@ -186,7 +193,10 @@  void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
 	if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
 					  &state_entry_time, offset,
 					  sizeof(state_entry_time)))
-		return;
+		goto out;
+
+out:
+	srcu_read_unlock(&v->kvm->srcu, idx);
 }
 
 int __kvm_xen_has_interrupt(struct kvm_vcpu *v)