diff mbox series

kvm: x86 mmu: avoid mmu_page_hash lookup for direct_map-only VM

Message ID 20180806225414.137991-1-pshier@google.com (mailing list archive)
State New, archived
Headers show
Series kvm: x86 mmu: avoid mmu_page_hash lookup for direct_map-only VM | expand

Commit Message

Peter Shier Aug. 6, 2018, 10:54 p.m. UTC
From: Peter Feiner <pfeiner@google.com>

Optimization for avoiding lookups in mmu_page_hash. When there's a
single direct root, a shadow page has at most one parent SPTE
(non-root SPs have exactly one; the root has none). Thus, if an SPTE
is non-present, it can be linked to a newly allocated SP without
first checking if the SP already exists.

Signed-off-by: Peter Feiner <pfeiner@google.com>
Signed-off-by: Peter Shier <pshier@google.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
---
 arch/x86/include/asm/kvm_host.h | 13 ++++++++
 arch/x86/kvm/mmu.c              | 55 +++++++++++++++++++--------------
 2 files changed, 45 insertions(+), 23 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c18958ef17d2c..b214788397b7f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -800,6 +800,19 @@  struct kvm_arch {
 	struct kvm_page_track_notifier_node mmu_sp_tracker;
 	struct kvm_page_track_notifier_head track_notifier_head;
 
+	/*
+	 * Optimization for avoiding lookups in mmu_page_hash. When there's a
+	 * single direct root, a shadow page has at most one parent SPTE
+	 * (non-root SPs have exactly one; the root has none). Thus, if an SPTE
+	 * is non-present, it can be linked to a newly allocated SP without
+	 * first checking if the SP already exists.
+	 *
+	 * False initially because there are no indirect roots.
+	 *
+	 * Guarded by mmu_lock.
+	 */
+	bool shadow_page_may_have_multiple_parents;
+
 	struct list_head assigned_dev_head;
 	struct iommu_domain *iommu_domain;
 	bool iommu_noncoherent;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f5aef52b148bf..7307cf76cddc8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2343,35 +2343,40 @@  static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
 		role.quadrant = quadrant;
 	}
-	for_each_valid_sp(vcpu->kvm, sp, gfn) {
-		if (sp->gfn != gfn) {
-			collisions++;
-			continue;
-		}
 
-		if (!need_sync && sp->unsync)
-			need_sync = true;
+	if (vcpu->kvm->arch.shadow_page_may_have_multiple_parents ||
+	    level == vcpu->arch.mmu.root_level) {
+		for_each_valid_sp(vcpu->kvm, sp, gfn) {
+			if (sp->gfn != gfn) {
+				collisions++;
+				continue;
+			}
 
-		if (sp->role.word != role.word)
-			continue;
+			if (!need_sync && sp->unsync)
+				need_sync = true;
 
-		if (sp->unsync) {
-			/* The page is good, but __kvm_sync_page might still end
-			 * up zapping it.  If so, break in order to rebuild it.
-			 */
-			if (!__kvm_sync_page(vcpu, sp, &invalid_list))
-				break;
+			if (sp->role.word != role.word)
+				continue;
 
-			WARN_ON(!list_empty(&invalid_list));
-			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
-		}
+			if (sp->unsync) {
+				/* The page is good, but __kvm_sync_page might
+				 * still end up zapping it.  If so, break in
+				 * order to rebuild it.
+				 */
+				if (!__kvm_sync_page(vcpu, sp, &invalid_list))
+					break;
 
-		if (sp->unsync_children)
-			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+				WARN_ON(!list_empty(&invalid_list));
+				kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+			}
 
-		__clear_sp_write_flooding_count(sp);
-		trace_kvm_mmu_get_page(sp, false);
-		goto out;
+			if (sp->unsync_children)
+				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+
+			__clear_sp_write_flooding_count(sp);
+			trace_kvm_mmu_get_page(sp, false);
+			goto out;
+		}
 	}
 
 	++vcpu->kvm->stat.mmu_cache_miss;
@@ -3542,6 +3547,10 @@  static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 	gfn_t root_gfn;
 	int i;
 
+	spin_lock(&vcpu->kvm->mmu_lock);
+	vcpu->kvm->arch.shadow_page_may_have_multiple_parents = true;
+	spin_unlock(&vcpu->kvm->mmu_lock);
+
 	root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
 
 	if (mmu_check_root(vcpu, root_gfn))