diff mbox series

[v2,1/2] KVM: x86/mmu: Ensure TDP MMU roots are freed after yield

Message ID 20210106185951.2966575-1-bgardon@google.com (mailing list archive)
State New, archived
Headers show
Series [v2,1/2] KVM: x86/mmu: Ensure TDP MMU roots are freed after yield | expand

Commit Message

Ben Gardon Jan. 6, 2021, 6:59 p.m. UTC
Many TDP MMU functions which need to perform some action on all TDP MMU
roots hold a reference on that root so that they can safely drop the MMU
lock in order to yield to other threads. However, when releasing the
reference on the root, there is a bug: the root will not be freed even
if its reference count (root_count) is reduced to 0.

To simplify acquiring and releasing references on TDP MMU root pages, and
to ensure that these roots are properly freed, move the get/put operations
into the TDP MMU root iterator macro. Not all functions which use the macro
currently get and put a reference to the root, but adding this behavior is
harmless.

Moving the get/put operations into the iterator macro also helps
simplify control flow when a root does need to be freed. Note that using
the list_for_each_entry_unsafe macro would not have been appropriate in
this situation because it could keep a reference to the next root across
an MMU lock release + reacquire.

Reported-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Fixes: faaf05b00aec ("kvm: x86/mmu: Support zapping SPTEs in the TDP MMU")
Fixes: 063afacd8730 ("kvm: x86/mmu: Support invalidate range MMU notifier for TDP MMU")
Fixes: a6a0b05da9f3 ("kvm: x86/mmu: Support dirty logging for the TDP MMU")
Fixes: 14881998566d ("kvm: x86/mmu: Support disabling dirty logging for the tdp MMU")
Signed-off-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/kvm/mmu/tdp_mmu.c | 97 +++++++++++++++++---------------------
 1 file changed, 44 insertions(+), 53 deletions(-)

Comments

Ben Gardon Jan. 6, 2021, 7:03 p.m. UTC | #1
On Wed, Jan 6, 2021 at 10:59 AM Ben Gardon <bgardon@google.com> wrote:
>
> Many TDP MMU functions which need to perform some action on all TDP MMU
> roots hold a reference on that root so that they can safely drop the MMU
> lock in order to yield to other threads. However, when releasing the
> reference on the root, there is a bug: the root will not be freed even
> if its reference count (root_count) is reduced to 0.
>
> To simplify acquiring and releasing references on TDP MMU root pages, and
> to ensure that these roots are properly freed, move the get/put operations
> into the TDP MMU root iterator macro. Not all functions which use the macro
> currently get and put a reference to the root, but adding this behavior is
> harmless.
>
> Moving the get/put operations into the iterator macro also helps
> simplify control flow when a root does need to be freed. Note that using
> the list_for_each_entry_unsafe macro would not have been appropriate in
> this situation because it could keep a reference to the next root across
> an MMU lock release + reacquire.
>
> Reported-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
> Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
> Fixes: faaf05b00aec ("kvm: x86/mmu: Support zapping SPTEs in the TDP MMU")
> Fixes: 063afacd8730 ("kvm: x86/mmu: Support invalidate range MMU notifier for TDP MMU")
> Fixes: a6a0b05da9f3 ("kvm: x86/mmu: Support dirty logging for the TDP MMU")
> Fixes: 14881998566d ("kvm: x86/mmu: Support disabling dirty logging for the tdp MMU")
> Signed-off-by: Ben Gardon <bgardon@google.com>
> ---
>  arch/x86/kvm/mmu/tdp_mmu.c | 97 +++++++++++++++++---------------------
>  1 file changed, 44 insertions(+), 53 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 75db27fda8f3..6e076b66973c 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -44,8 +44,44 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
>         WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
>  }
>
> -#define for_each_tdp_mmu_root(_kvm, _root)                         \
> -       list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
> +static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
> +{
> +       if (kvm_mmu_put_root(kvm, root))
> +               kvm_tdp_mmu_free_root(kvm, root);
> +}
> +
> +static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
> +                                          struct kvm_mmu_page *root)
> +{
> +       if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
> +               return false;
> +
> +       kvm_mmu_get_root(kvm, root);
> +       return true;
> +
> +}
> +
> +static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
> +                                                    struct kvm_mmu_page *root)
> +{
> +       struct kvm_mmu_page *next_root;
> +
> +       next_root = list_next_entry(root, link);
> +       tdp_mmu_put_root(kvm, root);
> +       return next_root;
> +}
> +
> +/*
> + * Note: this iterator gets and puts references to the roots it iterates over.
> + * This makes it safe to release the MMU lock and yield within the loop, but
> + * if exiting the loop early, the caller must drop the reference to the most
> + * recent root. (Unless keeping a live reference is desirable.)
> + */
> +#define for_each_tdp_mmu_root(_kvm, _root)                             \
> +       for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots,        \
> +                                     typeof(*_root), link);            \
> +            tdp_mmu_next_root_valid(_kvm, _root);                      \
> +            _root = tdp_mmu_next_root(_kvm, _root))
>
>  bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
>  {
> @@ -128,7 +164,11 @@ static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu)
>         /* Check for an existing root before allocating a new one. */
>         for_each_tdp_mmu_root(kvm, root) {
>                 if (root->role.word == role.word) {
> -                       kvm_mmu_get_root(kvm, root);
> +                       /*
> +                        * The iterator already acquired a reference to this
> +                        * root, so simply return early without dropping the
> +                        * reference.
> +                        */
>                         spin_unlock(&kvm->mmu_lock);
>                         return root;
>                 }
> @@ -447,18 +487,9 @@ bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
>         struct kvm_mmu_page *root;
>         bool flush = false;
>
> -       for_each_tdp_mmu_root(kvm, root) {
> -               /*
> -                * Take a reference on the root so that it cannot be freed if
> -                * this thread releases the MMU lock and yields in this loop.
> -                */
> -               kvm_mmu_get_root(kvm, root);
> -
> +       for_each_tdp_mmu_root(kvm, root)
>                 flush |= zap_gfn_range(kvm, root, start, end, true);
>
> -               kvm_mmu_put_root(kvm, root);
> -       }
> -
>         return flush;
>  }
>
> @@ -620,12 +651,6 @@ static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
>         int as_id;
>
>         for_each_tdp_mmu_root(kvm, root) {
> -               /*
> -                * Take a reference on the root so that it cannot be freed if
> -                * this thread releases the MMU lock and yields in this loop.
> -                */
> -               kvm_mmu_get_root(kvm, root);
> -
>                 as_id = kvm_mmu_page_as_id(root);
>                 slots = __kvm_memslots(kvm, as_id);
>                 kvm_for_each_memslot(memslot, slots) {
> @@ -647,8 +672,6 @@ static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
>                         ret |= handler(kvm, memslot, root, gfn_start,
>                                        gfn_end, data);
>                 }
> -
> -               kvm_mmu_put_root(kvm, root);
>         }
>
>         return ret;
> @@ -843,16 +866,8 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
>                 if (root_as_id != slot->as_id)
>                         continue;
>
> -               /*
> -                * Take a reference on the root so that it cannot be freed if
> -                * this thread releases the MMU lock and yields in this loop.
> -                */
> -               kvm_mmu_get_root(kvm, root);
> -
>                 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
>                              slot->base_gfn + slot->npages, min_level);
> -
> -               kvm_mmu_put_root(kvm, root);
>         }
>
>         return spte_set;
> @@ -911,16 +926,8 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
>                 if (root_as_id != slot->as_id)
>                         continue;
>
> -               /*
> -                * Take a reference on the root so that it cannot be freed if
> -                * this thread releases the MMU lock and yields in this loop.
> -                */
> -               kvm_mmu_get_root(kvm, root);
> -
>                 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
>                                 slot->base_gfn + slot->npages);
> -
> -               kvm_mmu_put_root(kvm, root);
>         }
>
>         return spte_set;
> @@ -1034,16 +1041,8 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
>                 if (root_as_id != slot->as_id)
>                         continue;
>
> -               /*
> -                * Take a reference on the root so that it cannot be freed if
> -                * this thread releases the MMU lock and yields in this loop.
> -                */
> -               kvm_mmu_get_root(kvm, root);
> -
>                 spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
>                                 slot->base_gfn + slot->npages);
> -
> -               kvm_mmu_put_root(kvm, root);
>         }
>         return spte_set;
>  }
> @@ -1094,16 +1093,8 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
>                 if (root_as_id != slot->as_id)
>                         continue;
>
> -               /*
> -                * Take a reference on the root so that it cannot be freed if
> -                * this thread releases the MMU lock and yields in this loop.
> -                */
> -               kvm_mmu_get_root(kvm, root);
> -
>                 zap_collapsible_spte_range(kvm, root, slot->base_gfn,
>                                            slot->base_gfn + slot->npages);
> -
> -               kvm_mmu_put_root(kvm, root);
>         }
>  }
>
> --
> 2.29.2.729.g45daf8777d-goog
>

I tested v2 with Maciej's test
(https://gist.github.com/maciejsszmigiero/890218151c242d99f63ea0825334c6c0,
near the bottom of the page) on an Intel Skylake Machine and can
confirm that v1 failed the test but v2 passes. The problem with v1 was
that roots were being removed from the list before list_next_entry was
called, resulting in a bad value.
Sean Christopherson Jan. 6, 2021, 9:29 p.m. UTC | #2
On Wed, Jan 06, 2021, Ben Gardon wrote:
> Many TDP MMU functions which need to perform some action on all TDP MMU
> roots hold a reference on that root so that they can safely drop the MMU
> lock in order to yield to other threads. However, when releasing the
> reference on the root, there is a bug: the root will not be freed even
> if its reference count (root_count) is reduced to 0.
> 
> To simplify acquiring and releasing references on TDP MMU root pages, and
> to ensure that these roots are properly freed, move the get/put operations
> into the TDP MMU root iterator macro. Not all functions which use the macro
> currently get and put a reference to the root, but adding this behavior is
> harmless.

I wouldn't say it's harmless, it creates the potential for refcount leaks where
they otherwise wouldn't be possible (the early loop exit scenario).  Not saying
this is the wrong approach, just that it's not without downsides.

Maybe preemptively add tdp_mmu_root_iter_break(), which would just be a wrapper
around kvm_mmu_put_root(), but might help readability (if it's ever needed)?
Not sure that's a good idea, someone will probably just remove the dead code in
the future :-)

> Moving the get/put operations into the iterator macro also helps
> simplify control flow when a root does need to be freed. Note that using
> the list_for_each_entry_unsafe macro would not have been appropriate in

s/list_for_each_entry_unsafe/list_for_each_entry_safe

> this situation because it could keep a reference to the next root across
> an MMU lock release + reacquire.

Use of "reference" is a confusing; above it means refcounts, here it means a
pointer _without_ an elevated refcount.  Something like this?

  ... would not have been apprporiate in this situation because it could keep
  a pointer to the next root across an MMU lock release + reacquire without
  pinning the next root.

> Reported-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
> Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
> Fixes: faaf05b00aec ("kvm: x86/mmu: Support zapping SPTEs in the TDP MMU")
> Fixes: 063afacd8730 ("kvm: x86/mmu: Support invalidate range MMU notifier for TDP MMU")
> Fixes: a6a0b05da9f3 ("kvm: x86/mmu: Support dirty logging for the TDP MMU")
> Fixes: 14881998566d ("kvm: x86/mmu: Support disabling dirty logging for the tdp MMU")
> Signed-off-by: Ben Gardon <bgardon@google.com>
> ---
>  arch/x86/kvm/mmu/tdp_mmu.c | 97 +++++++++++++++++---------------------
>  1 file changed, 44 insertions(+), 53 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 75db27fda8f3..6e076b66973c 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -44,8 +44,44 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
>  	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
>  }
>  
> -#define for_each_tdp_mmu_root(_kvm, _root)			    \
> -	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
> +static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
> +{
> +	if (kvm_mmu_put_root(kvm, root))
> +		kvm_tdp_mmu_free_root(kvm, root);
> +}
> +
> +static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
> +					   struct kvm_mmu_page *root)
> +{

Maybe add lockdep annotations here?  A couple callers already have 'em.

> +	if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
> +		return false;
> +
> +	kvm_mmu_get_root(kvm, root);
> +	return true;
> +
> +}
> +
> +static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
> +						     struct kvm_mmu_page *root)
> +{
> +	struct kvm_mmu_page *next_root;
> +
> +	next_root = list_next_entry(root, link);
> +	tdp_mmu_put_root(kvm, root);
> +	return next_root;
> +}
> +
> +/*
> + * Note: this iterator gets and puts references to the roots it iterates over.
> + * This makes it safe to release the MMU lock and yield within the loop, but
> + * if exiting the loop early, the caller must drop the reference to the most
> + * recent root. (Unless keeping a live reference is desirable.)
> + */
> +#define for_each_tdp_mmu_root(_kvm, _root)				\
> +	for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots,	\
> +				      typeof(*_root), link);		\
> +	     tdp_mmu_next_root_valid(_kvm, _root);			\
> +	     _root = tdp_mmu_next_root(_kvm, _root))
>  
>  bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
>  {
> @@ -128,7 +164,11 @@ static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu)
>  	/* Check for an existing root before allocating a new one. */
>  	for_each_tdp_mmu_root(kvm, root) {
>  		if (root->role.word == role.word) {
> -			kvm_mmu_get_root(kvm, root);
> +			/*
> +			 * The iterator already acquired a reference to this
> +			 * root, so simply return early without dropping the
> +			 * reference.
> +			 */
>  			spin_unlock(&kvm->mmu_lock);

I vote to open code use of list_for_each_entry() for this one specific case,
it's very much a one-off flow (relative to the other iteration scenarios).

>  			return root;
>  		}
Maciej S. Szmigiero Jan. 6, 2021, 10:13 p.m. UTC | #3
On 06.01.2021 20:03, Ben Gardon wrote:
> On Wed, Jan 6, 2021 at 10:59 AM Ben Gardon <bgardon@google.com> wrote:
>>
>> Many TDP MMU functions which need to perform some action on all TDP MMU
>> roots hold a reference on that root so that they can safely drop the MMU
>> lock in order to yield to other threads. However, when releasing the
>> reference on the root, there is a bug: the root will not be freed even
>> if its reference count (root_count) is reduced to 0.
>>
>> To simplify acquiring and releasing references on TDP MMU root pages, and
>> to ensure that these roots are properly freed, move the get/put operations
>> into the TDP MMU root iterator macro. Not all functions which use the macro
>> currently get and put a reference to the root, but adding this behavior is
>> harmless.
>>
>> Moving the get/put operations into the iterator macro also helps
>> simplify control flow when a root does need to be freed. Note that using
>> the list_for_each_entry_unsafe macro would not have been appropriate in
>> this situation because it could keep a reference to the next root across
>> an MMU lock release + reacquire.
>>
>> Reported-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
>> Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
>> Fixes: faaf05b00aec ("kvm: x86/mmu: Support zapping SPTEs in the TDP MMU")
>> Fixes: 063afacd8730 ("kvm: x86/mmu: Support invalidate range MMU notifier for TDP MMU")
>> Fixes: a6a0b05da9f3 ("kvm: x86/mmu: Support dirty logging for the TDP MMU")
>> Fixes: 14881998566d ("kvm: x86/mmu: Support disabling dirty logging for the tdp MMU")
>> Signed-off-by: Ben Gardon <bgardon@google.com>
>> ---
(..)
> I tested v2 with Maciej's test
> (https://urldefense.com/v3/__https://gist.github.com/maciejsszmigiero/890218151c242d99f63ea0825334c6c0__;!!GqivPVa7Brio!NUh8Xbu1YkhSf49HvbyhI-svvPmJXWj9KECqaEd7ZJMKPdz-HdND1sKduH2VpwasEN8Gpg$ ,
> near the bottom of the page) on an Intel Skylake Machine and can
> confirm that v1 failed the test but v2 passes. The problem with v1 was
> that roots were being removed from the list before list_next_entry was
> called, resulting in a bad value.
> 

I've tested the fix now and can confirm, too, that I can no longer
observe any crash.

Thanks,
Maciej
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 75db27fda8f3..6e076b66973c 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -44,8 +44,44 @@  void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
 }
 
-#define for_each_tdp_mmu_root(_kvm, _root)			    \
-	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
+static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
+{
+	if (kvm_mmu_put_root(kvm, root))
+		kvm_tdp_mmu_free_root(kvm, root);
+}
+
+static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
+					   struct kvm_mmu_page *root)
+{
+	if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
+		return false;
+
+	kvm_mmu_get_root(kvm, root);
+	return true;
+
+}
+
+static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
+						     struct kvm_mmu_page *root)
+{
+	struct kvm_mmu_page *next_root;
+
+	next_root = list_next_entry(root, link);
+	tdp_mmu_put_root(kvm, root);
+	return next_root;
+}
+
+/*
+ * Note: this iterator gets and puts references to the roots it iterates over.
+ * This makes it safe to release the MMU lock and yield within the loop, but
+ * if exiting the loop early, the caller must drop the reference to the most
+ * recent root. (Unless keeping a live reference is desirable.)
+ */
+#define for_each_tdp_mmu_root(_kvm, _root)				\
+	for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots,	\
+				      typeof(*_root), link);		\
+	     tdp_mmu_next_root_valid(_kvm, _root);			\
+	     _root = tdp_mmu_next_root(_kvm, _root))
 
 bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
 {
@@ -128,7 +164,11 @@  static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu)
 	/* Check for an existing root before allocating a new one. */
 	for_each_tdp_mmu_root(kvm, root) {
 		if (root->role.word == role.word) {
-			kvm_mmu_get_root(kvm, root);
+			/*
+			 * The iterator already acquired a reference to this
+			 * root, so simply return early without dropping the
+			 * reference.
+			 */
 			spin_unlock(&kvm->mmu_lock);
 			return root;
 		}
@@ -447,18 +487,9 @@  bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
 	struct kvm_mmu_page *root;
 	bool flush = false;
 
-	for_each_tdp_mmu_root(kvm, root) {
-		/*
-		 * Take a reference on the root so that it cannot be freed if
-		 * this thread releases the MMU lock and yields in this loop.
-		 */
-		kvm_mmu_get_root(kvm, root);
-
+	for_each_tdp_mmu_root(kvm, root)
 		flush |= zap_gfn_range(kvm, root, start, end, true);
 
-		kvm_mmu_put_root(kvm, root);
-	}
-
 	return flush;
 }
 
@@ -620,12 +651,6 @@  static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
 	int as_id;
 
 	for_each_tdp_mmu_root(kvm, root) {
-		/*
-		 * Take a reference on the root so that it cannot be freed if
-		 * this thread releases the MMU lock and yields in this loop.
-		 */
-		kvm_mmu_get_root(kvm, root);
-
 		as_id = kvm_mmu_page_as_id(root);
 		slots = __kvm_memslots(kvm, as_id);
 		kvm_for_each_memslot(memslot, slots) {
@@ -647,8 +672,6 @@  static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
 			ret |= handler(kvm, memslot, root, gfn_start,
 				       gfn_end, data);
 		}
-
-		kvm_mmu_put_root(kvm, root);
 	}
 
 	return ret;
@@ -843,16 +866,8 @@  bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
 		if (root_as_id != slot->as_id)
 			continue;
 
-		/*
-		 * Take a reference on the root so that it cannot be freed if
-		 * this thread releases the MMU lock and yields in this loop.
-		 */
-		kvm_mmu_get_root(kvm, root);
-
 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
 			     slot->base_gfn + slot->npages, min_level);
-
-		kvm_mmu_put_root(kvm, root);
 	}
 
 	return spte_set;
@@ -911,16 +926,8 @@  bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
 		if (root_as_id != slot->as_id)
 			continue;
 
-		/*
-		 * Take a reference on the root so that it cannot be freed if
-		 * this thread releases the MMU lock and yields in this loop.
-		 */
-		kvm_mmu_get_root(kvm, root);
-
 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
 				slot->base_gfn + slot->npages);
-
-		kvm_mmu_put_root(kvm, root);
 	}
 
 	return spte_set;
@@ -1034,16 +1041,8 @@  bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
 		if (root_as_id != slot->as_id)
 			continue;
 
-		/*
-		 * Take a reference on the root so that it cannot be freed if
-		 * this thread releases the MMU lock and yields in this loop.
-		 */
-		kvm_mmu_get_root(kvm, root);
-
 		spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
 				slot->base_gfn + slot->npages);
-
-		kvm_mmu_put_root(kvm, root);
 	}
 	return spte_set;
 }
@@ -1094,16 +1093,8 @@  void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
 		if (root_as_id != slot->as_id)
 			continue;
 
-		/*
-		 * Take a reference on the root so that it cannot be freed if
-		 * this thread releases the MMU lock and yields in this loop.
-		 */
-		kvm_mmu_get_root(kvm, root);
-
 		zap_collapsible_spte_range(kvm, root, slot->base_gfn,
 					   slot->base_gfn + slot->npages);
-
-		kvm_mmu_put_root(kvm, root);
 	}
 }