diff mbox series

[RFC,26/28] kvm: mmu: Integrate direct MMU with nesting

Message ID 20190926231824.149014-27-bgardon@google.com (mailing list archive)
State New, archived
Headers show
Series kvm: mmu: Rework the x86 TDP direct mapped case | expand

Commit Message

Ben Gardon Sept. 26, 2019, 11:18 p.m. UTC
Allows the existing nesting implementation to interoperate with the
direct MMU.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/kvm/mmu.c | 51 ++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 45 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a0c5271ae2381..e0f35da0d1027 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2742,6 +2742,29 @@  void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
 }
 
+static bool rmap_write_protect_direct_gfn(struct kvm *kvm,
+					  struct kvm_memory_slot *slot,
+					  gfn_t gfn)
+{
+	struct direct_walk_iterator iter;
+	u64 new_pte;
+
+	direct_walk_iterator_setup_walk(&iter, kvm, slot->as_id, gfn, gfn + 1,
+					MMU_WRITE_LOCK);
+	while (direct_walk_iterator_next_present_leaf_pte(&iter)) {
+		if (!is_writable_pte(iter.old_pte) &&
+		    !spte_can_locklessly_be_made_writable(iter.old_pte))
+			break;
+
+		new_pte = iter.old_pte &
+			~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
+
+		if (!direct_walk_iterator_set_pte(&iter, new_pte))
+			continue;
+	}
+	return direct_walk_iterator_end_traversal(&iter);
+}
+
 /**
  * kvm_arch_write_log_dirty - emulate dirty page logging
  * @vcpu: Guest mode vcpu
@@ -2764,6 +2787,10 @@  bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
 	int i;
 	bool write_protected = false;
 
+	if (kvm->arch.direct_mmu_enabled)
+		write_protected |= rmap_write_protect_direct_gfn(kvm, slot,
+								 gfn);
+
 	for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
 		rmap_head = __gfn_to_rmap(gfn, i, slot);
 		write_protected |= __rmap_write_protect(kvm, rmap_head, true);
@@ -5755,6 +5782,8 @@  static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
 	uint i;
 	struct kvm_mmu_root_info root;
 	struct kvm_mmu *mmu = vcpu->arch.mmu;
+	bool direct_mmu_root = (vcpu->kvm->arch.direct_mmu_enabled &&
+				new_role.direct);
 
 	root.cr3 = mmu->root_cr3;
 	root.hpa = mmu->root_hpa;
@@ -5762,10 +5791,14 @@  static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
 		swap(root, mmu->prev_roots[i]);
 
-		if (new_cr3 == root.cr3 && VALID_PAGE(root.hpa) &&
-		    page_header(root.hpa) != NULL &&
-		    new_role.word == page_header(root.hpa)->role.word)
-			break;
+		if (new_cr3 == root.cr3 && VALID_PAGE(root.hpa)) {
+			BUG_ON(direct_mmu_root &&
+				!is_direct_mmu_root(vcpu->kvm, root.hpa));
+
+			if (direct_mmu_root || (page_header(root.hpa) != NULL &&
+			    new_role.word == page_header(root.hpa)->role.word))
+				break;
+		}
 	}
 
 	mmu->root_hpa = root.hpa;
@@ -5813,8 +5846,14 @@  static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
 			 */
 			vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
 
-			__clear_sp_write_flooding_count(
-				page_header(mmu->root_hpa));
+			/*
+			 * If this is a direct MMU root page, it doesn't have a
+			 * write flooding count.
+			 */
+			if (!(vcpu->kvm->arch.direct_mmu_enabled &&
+			      new_role.direct))
+				__clear_sp_write_flooding_count(
+						page_header(mmu->root_hpa));
 
 			return true;
 		}