diff mbox series

[09/27] drm/i915/gvt: Protect gfn hash table with dedicated mutex

Message ID 20221223005739.1295925-10-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series drm/i915/gvt: KVM: KVMGT fixes and page-track cleanups | expand

Commit Message

Sean Christopherson Dec. 23, 2022, 12:57 a.m. UTC
Add and use a new mutex, gfn_lock, to protect accesses to the hash table
used to track which gfns are write-protected when shadowing the guest's
GTT.  This fixes a bug where kvmgt_page_track_write(), which doesn't hold
kvm->mmu_lock, could race with intel_gvt_page_track_remove() and trigger
a use-after-free.

Fixing kvmgt_page_track_write() by taking kvm->mmu_lock is not an option
as mmu_lock is a r/w spinlock, and intel_vgpu_page_track_handler() might
sleep when acquiring vgpu->cache_lock deep down the callstack:

  intel_vgpu_page_track_handler()
  |
  |->  page_track->handler / ppgtt_write_protection_handler()
       |
       |-> ppgtt_handle_guest_write_page_table_bytes()
           |
           |->  ppgtt_handle_guest_write_page_table()
                |
                |-> ppgtt_handle_guest_entry_removal()
                    |
                    |-> ppgtt_invalidate_pte()
                        |
                        |-> intel_gvt_dma_unmap_guest_page()
                            |
                            |-> mutex_lock(&vgpu->cache_lock);

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 drivers/gpu/drm/i915/gvt/gvt.h   |  1 +
 drivers/gpu/drm/i915/gvt/kvmgt.c | 65 ++++++++++++++++++++------------
 drivers/gpu/drm/i915/gvt/vgpu.c  |  1 +
 3 files changed, 43 insertions(+), 24 deletions(-)

Comments

Yan Zhao Dec. 28, 2022, 5:03 a.m. UTC | #1
On Fri, Dec 23, 2022 at 12:57:21AM +0000, Sean Christopherson wrote:
> Add and use a new mutex, gfn_lock, to protect accesses to the hash table
> used to track which gfns are write-protected when shadowing the guest's
> GTT.  This fixes a bug where kvmgt_page_track_write(), which doesn't hold
> kvm->mmu_lock, could race with intel_gvt_page_track_remove() and trigger
> a use-after-free.
> 
> Fixing kvmgt_page_track_write() by taking kvm->mmu_lock is not an option
> as mmu_lock is a r/w spinlock, and intel_vgpu_page_track_handler() might
> sleep when acquiring vgpu->cache_lock deep down the callstack:
> 
>   intel_vgpu_page_track_handler()
>   |
>   |->  page_track->handler / ppgtt_write_protection_handler()
>        |
>        |-> ppgtt_handle_guest_write_page_table_bytes()
>            |
>            |->  ppgtt_handle_guest_write_page_table()
>                 |
>                 |-> ppgtt_handle_guest_entry_removal()
>                     |
>                     |-> ppgtt_invalidate_pte()
>                         |
>                         |-> intel_gvt_dma_unmap_guest_page()
>                             |
>                             |-> mutex_lock(&vgpu->cache_lock);
> 
This gfn_lock could lead to deadlock in below sequence.

(1) kvm_write_track_add_gfn() to GFN 1
(2) kvmgt_page_track_write() for GFN 1
kvmgt_page_track_write()
|
|->mutex_lock(&info->vgpu_lock)
|->intel_vgpu_page_track_handler (as is kvmgt_gfn_is_write_protected)
   |
   |->page_track->handler() (ppgtt_write_protection_handler())
      |	
      |->ppgtt_handle_guest_write_page_table_bytes()
         |
         |->ppgtt_handle_guest_write_page_table()
	    |
	    |->ppgtt_handle_guest_entry_add() --> new_present
	       |
	       |->ppgtt_populate_spt_by_guest_entry()
	          |
		  |->intel_vgpu_enable_page_track() --> for GFN 2
		     |
		     |->intel_gvt_page_track_add()
		        |
			|->mutex_lock(&info->gfn_lock) ===>deadlock


Below fix based on this patch is to reuse vgpu_lock to protect the hash table
info->ptable.
Please check if it's good.


diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index b924ed079ad4..526bd973e784 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -364,7 +364,7 @@ __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn)
 {
        struct kvmgt_pgfn *p, *res = NULL;

-       lockdep_assert_held(&info->gfn_lock);
+       lockdep_assert_held(&info->vgpu_lock);

        hash_for_each_possible(info->ptable, p, hnode, gfn) {
                if (gfn == p->gfn) {
@@ -388,7 +388,7 @@ static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn)
 {
        struct kvmgt_pgfn *p;

-       lockdep_assert_held(&info->gfn_lock);
+       lockdep_assert_held(&info->vgpu_lock);

        if (kvmgt_gfn_is_write_protected(info, gfn))
                return;
@@ -1572,7 +1572,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
        if (!info->attached)
                return -ESRCH;

-       mutex_lock(&info->gfn_lock);
+       lockdep_assert_held(&info->vgpu_lock);

        if (kvmgt_gfn_is_write_protected(info, gfn))
                goto out;
@@ -1581,7 +1581,6 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
        if (!ret)
                kvmgt_protect_table_add(info, gfn);
 out:
-       mutex_unlock(&info->gfn_lock);
        return ret;
 }

@@ -1592,7 +1591,7 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
        if (!info->attached)
                return 0;

-       mutex_lock(&info->gfn_lock);
+       lockdep_assert_held(&info->vgpu_lock);

        if (!kvmgt_gfn_is_write_protected(info, gfn))
                goto out;
@@ -1601,7 +1600,6 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
        if (!ret)
                kvmgt_protect_table_del(info, gfn);
 out:
-       mutex_unlock(&info->gfn_lock);
        return ret;
 }

@@ -1612,13 +1610,15 @@ static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len,
                container_of(node, struct intel_vgpu, track_node);

        mutex_lock(&info->vgpu_lock);
-       mutex_lock(&info->gfn_lock);

        if (kvmgt_gfn_is_write_protected(info, gpa >> PAGE_SHIFT))
                intel_vgpu_page_track_handler(info, gpa,
                                                     (void *)val, len);
        }

-       mutex_unlock(&info->gfn_lock);
        mutex_unlock(&info->vgpu_lock);
 }
@@ -1629,12 +1629,11 @@ static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
        struct intel_vgpu *info =
                container_of(node, struct intel_vgpu, track_node);
 
-       mutex_lock(&info->gfn_lock);
+       lockdep_assert_held(&info->vgpu_lock);
        for (i = 0; i < nr_pages; i++) {
                if (kvmgt_gfn_is_write_protected(info, gfn + i))
                        kvmgt_protect_table_del(info, gfn + i);
        }
-       mutex_unlock(&info->gfn_lock);
 }


Thanks
Yan
Sean Christopherson Jan. 3, 2023, 8:43 p.m. UTC | #2
On Wed, Dec 28, 2022, Yan Zhao wrote:
> On Fri, Dec 23, 2022 at 12:57:21AM +0000, Sean Christopherson wrote:
> > Add and use a new mutex, gfn_lock, to protect accesses to the hash table
> > used to track which gfns are write-protected when shadowing the guest's
> > GTT.  This fixes a bug where kvmgt_page_track_write(), which doesn't hold
> > kvm->mmu_lock, could race with intel_gvt_page_track_remove() and trigger
> > a use-after-free.
> > 
> > Fixing kvmgt_page_track_write() by taking kvm->mmu_lock is not an option
> > as mmu_lock is a r/w spinlock, and intel_vgpu_page_track_handler() might
> > sleep when acquiring vgpu->cache_lock deep down the callstack:
> > 
> >   intel_vgpu_page_track_handler()
> >   |
> >   |->  page_track->handler / ppgtt_write_protection_handler()
> >        |
> >        |-> ppgtt_handle_guest_write_page_table_bytes()
> >            |
> >            |->  ppgtt_handle_guest_write_page_table()
> >                 |
> >                 |-> ppgtt_handle_guest_entry_removal()
> >                     |
> >                     |-> ppgtt_invalidate_pte()
> >                         |
> >                         |-> intel_gvt_dma_unmap_guest_page()
> >                             |
> >                             |-> mutex_lock(&vgpu->cache_lock);
> > 
> This gfn_lock could lead to deadlock in below sequence.
> 
> (1) kvm_write_track_add_gfn() to GFN 1
> (2) kvmgt_page_track_write() for GFN 1
> kvmgt_page_track_write()
> |
> |->mutex_lock(&info->vgpu_lock)
> |->intel_vgpu_page_track_handler (as is kvmgt_gfn_is_write_protected)
>    |
>    |->page_track->handler() (ppgtt_write_protection_handler())
>       |	
>       |->ppgtt_handle_guest_write_page_table_bytes()
>          |
>          |->ppgtt_handle_guest_write_page_table()
> 	    |
> 	    |->ppgtt_handle_guest_entry_add() --> new_present
> 	       |
> 	       |->ppgtt_populate_spt_by_guest_entry()
> 	          |
> 		  |->intel_vgpu_enable_page_track() --> for GFN 2
> 		     |
> 		     |->intel_gvt_page_track_add()
> 		        |
> 			|->mutex_lock(&info->gfn_lock) ===>deadlock

Or even more simply, 

  kvmgt_page_track_write()
  |
  -> intel_vgpu_page_track_handler()
     |
     -> intel_gvt_page_track_remove()

> 
> Below fix based on this patch is to reuse vgpu_lock to protect the hash table
> info->ptable.
> Please check if it's good.
> 
> 
> diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
> index b924ed079ad4..526bd973e784 100644
> --- a/drivers/gpu/drm/i915/gvt/kvmgt.c
> +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
> @@ -364,7 +364,7 @@ __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn)
>  {
>         struct kvmgt_pgfn *p, *res = NULL;
> 
> -       lockdep_assert_held(&info->gfn_lock);
> +       lockdep_assert_held(&info->vgpu_lock);
> 
>         hash_for_each_possible(info->ptable, p, hnode, gfn) {
>                 if (gfn == p->gfn) {
> @@ -388,7 +388,7 @@ static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn)
>  {
>         struct kvmgt_pgfn *p;
> 
> -       lockdep_assert_held(&info->gfn_lock);
> +       lockdep_assert_held(&info->vgpu_lock);

I'll just delete these assertions, the one in __kvmgt_protect_table_find() should
cover everything and is ultimately the assert that matters.

> @@ -1629,12 +1629,11 @@ static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
>         struct intel_vgpu *info =
>                 container_of(node, struct intel_vgpu, track_node);
>  
> -       mutex_lock(&info->gfn_lock);
> +       lockdep_assert_held(&info->vgpu_lock);

This path needs to manually take vgpu_lock as it's called from KVM.  IIRC, this
is the main reason I tried adding a new lock.  That and I had a hell of a time
figuring out whether or not vgpu_lock would actually be held.

Looking at this with fresh eyes, AFAICT intel_vgpu_reset_gtt() is the only other
path that can reach __kvmgt_protect_table_find() without holding vgpu_lock, by
way of intel_gvt_page_track_remove().  But unless there's magic I'm missing, that's
dead code and can simply be deleted.
Yan Zhao Jan. 5, 2023, 12:51 a.m. UTC | #3
On Tue, Jan 03, 2023 at 08:43:17PM +0000, Sean Christopherson wrote:
> On Wed, Dec 28, 2022, Yan Zhao wrote:
> > On Fri, Dec 23, 2022 at 12:57:21AM +0000, Sean Christopherson wrote:
> > > Add and use a new mutex, gfn_lock, to protect accesses to the hash table
> > > used to track which gfns are write-protected when shadowing the guest's
> > > GTT.  This fixes a bug where kvmgt_page_track_write(), which doesn't hold
> > > kvm->mmu_lock, could race with intel_gvt_page_track_remove() and trigger
> > > a use-after-free.
> > > 
> > > Fixing kvmgt_page_track_write() by taking kvm->mmu_lock is not an option
> > > as mmu_lock is a r/w spinlock, and intel_vgpu_page_track_handler() might
> > > sleep when acquiring vgpu->cache_lock deep down the callstack:
> > > 
> > >   intel_vgpu_page_track_handler()
> > >   |
> > >   |->  page_track->handler / ppgtt_write_protection_handler()
> > >        |
> > >        |-> ppgtt_handle_guest_write_page_table_bytes()
> > >            |
> > >            |->  ppgtt_handle_guest_write_page_table()
> > >                 |
> > >                 |-> ppgtt_handle_guest_entry_removal()
> > >                     |
> > >                     |-> ppgtt_invalidate_pte()
> > >                         |
> > >                         |-> intel_gvt_dma_unmap_guest_page()
> > >                             |
> > >                             |-> mutex_lock(&vgpu->cache_lock);
> > > 
> > This gfn_lock could lead to deadlock in below sequence.
> > 
> > (1) kvm_write_track_add_gfn() to GFN 1
> > (2) kvmgt_page_track_write() for GFN 1
> > kvmgt_page_track_write()
> > |
> > |->mutex_lock(&info->vgpu_lock)
> > |->intel_vgpu_page_track_handler (as is kvmgt_gfn_is_write_protected)
> >    |
> >    |->page_track->handler() (ppgtt_write_protection_handler())
> >       |	
> >       |->ppgtt_handle_guest_write_page_table_bytes()
> >          |
> >          |->ppgtt_handle_guest_write_page_table()
> > 	    |
> > 	    |->ppgtt_handle_guest_entry_add() --> new_present
> > 	       |
> > 	       |->ppgtt_populate_spt_by_guest_entry()
> > 	          |
> > 		  |->intel_vgpu_enable_page_track() --> for GFN 2
> > 		     |
> > 		     |->intel_gvt_page_track_add()
> > 		        |
> > 			|->mutex_lock(&info->gfn_lock) ===>deadlock
> 
> Or even more simply, 
> 
>   kvmgt_page_track_write()
>   |
>   -> intel_vgpu_page_track_handler()
>      |
>      -> intel_gvt_page_track_remove()
>
yes.

> > 
> > Below fix based on this patch is to reuse vgpu_lock to protect the hash table
> > info->ptable.
> > Please check if it's good.
> > 
> > 
> > diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
> > index b924ed079ad4..526bd973e784 100644
> > --- a/drivers/gpu/drm/i915/gvt/kvmgt.c
> > +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
> > @@ -364,7 +364,7 @@ __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn)
> >  {
> >         struct kvmgt_pgfn *p, *res = NULL;
> > 
> > -       lockdep_assert_held(&info->gfn_lock);
> > +       lockdep_assert_held(&info->vgpu_lock);
> > 
> >         hash_for_each_possible(info->ptable, p, hnode, gfn) {
> >                 if (gfn == p->gfn) {
> > @@ -388,7 +388,7 @@ static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn)
> >  {
> >         struct kvmgt_pgfn *p;
> > 
> > -       lockdep_assert_held(&info->gfn_lock);
> > +       lockdep_assert_held(&info->vgpu_lock);
> 
> I'll just delete these assertions, the one in __kvmgt_protect_table_find() should
> cover everything and is ultimately the assert that matters.
> 
> > @@ -1629,12 +1629,11 @@ static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
> >         struct intel_vgpu *info =
> >                 container_of(node, struct intel_vgpu, track_node);
> >  
> > -       mutex_lock(&info->gfn_lock);
> > +       lockdep_assert_held(&info->vgpu_lock);
> 
> This path needs to manually take vgpu_lock as it's called from KVM.  IIRC, this
> is the main reason I tried adding a new lock.  That and I had a hell of a time
> figuring out whether or not vgpu_lock would actually be held.
Right. In the path of kvmgt_page_track_remove_region(),
mutex_lock(&info->vgpu_lock) and  mutex_unlock(&info->vgpu_lock) are
required.

static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
                                           struct kvm_page_track_notifier_node *node)
{
        unsigned long i;
        struct intel_vgpu *info =
                container_of(node, struct intel_vgpu, track_node);

        mutex_lock(&info->vgpu_lock);
        for (i = 0; i < nr_pages; i++) {
                if (kvmgt_gfn_is_write_protected(info, gfn + i))
                        kvmgt_protect_table_del(info, gfn + i);
        }
        mutex_unlock(&info->vgpu_lock);
}

The reason I previously could have lockdep_assert_held(&info->vgpu_lock) passed
is that I didn't get LOCKDEP configured, so it's basically a void.
(sorry, though I actually also called mutex_is_locked(&info->vcpu_lock)
in some paths to check lockdep_assert_held() worked properly. But it's my
fault not to double check it's compiled correctly).


> 
> Looking at this with fresh eyes, AFAICT intel_vgpu_reset_gtt() is the only other
> path that can reach __kvmgt_protect_table_find() without holding vgpu_lock, by
> way of intel_gvt_page_track_remove().  But unless there's magic I'm missing, that's
> dead code and can simply be deleted.
Yes, I found intel_vgpu_reset_gtt() has not been called since
ba25d977571e1551b7032d6104e49efd6f88f8ad.
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index dbf8d7470b2c..fbfd7eafec14 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -176,6 +176,7 @@  struct intel_vgpu {
 	struct vfio_device vfio_device;
 	struct intel_gvt *gvt;
 	struct mutex vgpu_lock;
+	struct mutex gfn_lock;
 	int id;
 	bool active;
 	bool attached;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index ca9926061cd8..a4747e153dad 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -366,6 +366,8 @@  __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn)
 {
 	struct kvmgt_pgfn *p, *res = NULL;
 
+	lockdep_assert_held(&info->gfn_lock);
+
 	hash_for_each_possible(info->ptable, p, hnode, gfn) {
 		if (gfn == p->gfn) {
 			res = p;
@@ -388,6 +390,8 @@  static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn)
 {
 	struct kvmgt_pgfn *p;
 
+	lockdep_assert_held(&info->gfn_lock);
+
 	if (kvmgt_gfn_is_write_protected(info, gfn))
 		return;
 
@@ -1563,60 +1567,68 @@  int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
 {
 	struct kvm *kvm = info->vfio_device.kvm;
 	struct kvm_memory_slot *slot;
-	int idx;
+	int idx, ret = 0;
 
 	if (!info->attached)
 		return -ESRCH;
 
+	mutex_lock(&info->gfn_lock);
+
+	if (kvmgt_gfn_is_write_protected(info, gfn))
+		goto out;
+
 	idx = srcu_read_lock(&kvm->srcu);
 	slot = gfn_to_memslot(kvm, gfn);
 	if (!slot) {
 		srcu_read_unlock(&kvm->srcu, idx);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 
 	write_lock(&kvm->mmu_lock);
-
-	if (kvmgt_gfn_is_write_protected(info, gfn))
-		goto out;
-
 	kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
+	write_unlock(&kvm->mmu_lock);
+
+	srcu_read_unlock(&kvm->srcu, idx);
+
 	kvmgt_protect_table_add(info, gfn);
-
 out:
-	write_unlock(&kvm->mmu_lock);
-	srcu_read_unlock(&kvm->srcu, idx);
-	return 0;
+	mutex_unlock(&info->gfn_lock);
+	return ret;
 }
 
 int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
 {
 	struct kvm *kvm = info->vfio_device.kvm;
 	struct kvm_memory_slot *slot;
-	int idx;
+	int idx, ret = 0;
 
 	if (!info->attached)
 		return 0;
 
-	idx = srcu_read_lock(&kvm->srcu);
-	slot = gfn_to_memslot(kvm, gfn);
-	if (!slot) {
-		srcu_read_unlock(&kvm->srcu, idx);
-		return -EINVAL;
-	}
-
-	write_lock(&kvm->mmu_lock);
+	mutex_lock(&info->gfn_lock);
 
 	if (!kvmgt_gfn_is_write_protected(info, gfn))
 		goto out;
 
+	idx = srcu_read_lock(&kvm->srcu);
+	slot = gfn_to_memslot(kvm, gfn);
+	if (!slot) {
+		srcu_read_unlock(&kvm->srcu, idx);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	write_lock(&kvm->mmu_lock);
 	kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
+	write_unlock(&kvm->mmu_lock);
+	srcu_read_unlock(&kvm->srcu, idx);
+
 	kvmgt_protect_table_del(info, gfn);
 
 out:
-	write_unlock(&kvm->mmu_lock);
-	srcu_read_unlock(&kvm->srcu, idx);
-	return 0;
+	mutex_unlock(&info->gfn_lock);
+	return ret;
 }
 
 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -1627,11 +1639,13 @@  static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 		container_of(node, struct intel_vgpu, track_node);
 
 	mutex_lock(&info->vgpu_lock);
+	mutex_lock(&info->gfn_lock);
 
 	if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
 		intel_vgpu_page_track_handler(info, gpa,
 						     (void *)val, len);
 
+	mutex_unlock(&info->gfn_lock);
 	mutex_unlock(&info->vgpu_lock);
 }
 
@@ -1644,16 +1658,19 @@  static void kvmgt_page_track_flush_slot(struct kvm *kvm,
 	struct intel_vgpu *info =
 		container_of(node, struct intel_vgpu, track_node);
 
-	write_lock(&kvm->mmu_lock);
+	mutex_lock(&info->gfn_lock);
 	for (i = 0; i < slot->npages; i++) {
 		gfn = slot->base_gfn + i;
 		if (kvmgt_gfn_is_write_protected(info, gfn)) {
+			write_lock(&kvm->mmu_lock);
 			kvm_slot_page_track_remove_page(kvm, slot, gfn,
 						KVM_PAGE_TRACK_WRITE);
+			write_unlock(&kvm->mmu_lock);
+
 			kvmgt_protect_table_del(info, gfn);
 		}
 	}
-	write_unlock(&kvm->mmu_lock);
+	mutex_unlock(&info->gfn_lock);
 }
 
 void intel_vgpu_detach_regions(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 56c71474008a..f2479781b770 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -277,6 +277,7 @@  struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
 	vgpu->id = IDLE_VGPU_IDR;
 	vgpu->gvt = gvt;
 	mutex_init(&vgpu->vgpu_lock);
+	mutex_init(&vgpu->gfn_lock);
 
 	for (i = 0; i < I915_NUM_ENGINES; i++)
 		INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);