@@ -6187,6 +6187,9 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
kvm_zap_obsolete_pages(kvm);
+ if (tdp_mmu_enabled)
+ kvm_tdp_mmu_zap_exported_roots(kvm);
+
write_unlock(&kvm->mmu_lock);
/*
@@ -897,12 +897,38 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
read_unlock(&kvm->mmu_lock);
}
+void kvm_tdp_mmu_zap_exported_roots(struct kvm *kvm)
+{
+#ifdef CONFIG_HAVE_KVM_EXPORTED_TDP
+ struct kvm_mmu_page *root;
+ bool flush;
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) {
+ if (!root->exported)
+ continue;
+
+ flush = tdp_mmu_zap_leafs(kvm, root, 0, -1ULL, false, false);
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+ }
+
+ rcu_read_unlock();
+#endif
+}
+
/*
- * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
- * is about to be zapped, e.g. in response to a memslots update. The actual
- * zapping is done separately so that it happens with mmu_lock with read,
- * whereas invalidating roots must be done with mmu_lock held for write (unless
- * the VM is being destroyed).
+ * Mark each TDP MMU root (except exported root) as invalid to prevent vCPUs from
+ * reusing a root that is about to be zapped, e.g. in response to a memslots
+ * update.
+ * The actual zapping is done separately so that it happens with mmu_lock
+ * with read, whereas invalidating roots must be done with mmu_lock held for write
+ * (unless the VM is being destroyed).
+ * For exported root, zap is done in kvm_tdp_mmu_zap_exported_roots() before
+ * the memslot update completes with mmu_lock held for write.
*
* Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
* See kvm_tdp_mmu_get_vcpu_root_hpa().
@@ -932,6 +958,10 @@ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
* or get/put references to roots.
*/
list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
+#ifdef CONFIG_HAVE_KVM_EXPORTED_TDP
+ if (root->exported)
+ continue;
+#endif
/*
* Note, invalid roots can outlive a memslot update! Invalid
* roots must be *zapped* before the memslot update completes,
@@ -25,6 +25,7 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
+void kvm_tdp_mmu_zap_exported_roots(struct kvm *kvm);
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
Keep exported TDP root always valid and zap all leaf entries to replace the "root role invalid" operation. Unlike TDP roots accessed by vCPUs only, update of TDP root exported to external components must be in an atomic way, like 1. allocating new root, 2. updating and notifying new root to external components, 3. making old root invalid, So, it's more efficient to just zap all leaf entries of the exported TDP. Though zapping all leaf entries will make "fast zap" not fast enough, as with commit 0df9dab891ff ("KVM: x86/mmu: Stop zapping invalidated TDP MMU roots asynchronously"), zap of root is anyway required to be done synchronously in kvm_mmu_zap_all_fast() before completing memslot removal. Besides, it's also safe to skip invalidating "exported" root in kvm_tdp_mmu_invalidate_all_roots() for path kvm_mmu_uninit_tdp_mmu(), because when the VM is shutting down, as TDP FD will hold reference count of kvm, kvm_mmu_uninit_tdp_mmu() --> kvm_tdp_mmu_invalidate_all_roots() will not come until the TDP root is unmarked as "exported" and put. All children entries are also zapped before the root is put. Signed-off-by: Yan Zhao <yan.y.zhao@intel.com> --- arch/x86/kvm/mmu/mmu.c | 3 +++ arch/x86/kvm/mmu/tdp_mmu.c | 40 +++++++++++++++++++++++++++++++++----- arch/x86/kvm/mmu/tdp_mmu.h | 1 + 3 files changed, 39 insertions(+), 5 deletions(-)