diff mbox series

[05/10] kvm: x86: mmu: Remove extra TLB flush from vmx_slot_enable_log_dirty()

Message ID 20181020031543.124399-6-junaids@google.com (mailing list archive)
State New, archived
Headers show
Series [01/10] kvm: mmu: spte_write_protect optimization | expand

Commit Message

Junaid Shahid Oct. 20, 2018, 3:15 a.m. UTC
This can currently invoke two remote TLB flushes, one each from clearing
D bits and write protecting large pages. Instead, add a wrapper that does
both and then invokes a single remote TLB flush if needed.

Signed-off-by: Junaid Shahid <junaids@google.com>
---
 arch/x86/include/asm/kvm_host.h |  6 ++++--
 arch/x86/kvm/mmu.c              | 21 ++++++++++++++++-----
 arch/x86/kvm/vmx.c              |  3 +--
 3 files changed, 21 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 796a44d100c1..78187944494a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1236,10 +1236,12 @@  void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
 				      struct kvm_memory_slot *memslot);
 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
 				   const struct kvm_memory_slot *memslot);
-void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
+bool kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
 				   struct kvm_memory_slot *memslot);
-void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
+bool kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
 					struct kvm_memory_slot *memslot);
+void kvm_mmu_slot_wrprot_lpage_and_clear_dirty(struct kvm *kvm,
+					       struct kvm_memory_slot *memslot);
 void kvm_mmu_slot_set_dirty(struct kvm *kvm,
 			    struct kvm_memory_slot *memslot);
 void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3162cdd6698c..88d3ac0dae9e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5747,7 +5747,7 @@  void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
 	spin_unlock(&kvm->mmu_lock);
 }
 
-void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
+bool kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
 				   struct kvm_memory_slot *memslot)
 {
 	bool flush;
@@ -5765,12 +5765,12 @@  void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
 	 * out of mmu lock also guarantees no dirty pages will be lost in
 	 * dirty_bitmap.
 	 */
-	if (flush)
-		kvm_flush_remote_tlbs(kvm);
+
+	return flush;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
 
-void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
+bool kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
 					struct kvm_memory_slot *memslot)
 {
 	bool flush;
@@ -5783,10 +5783,21 @@  void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
 	/* see kvm_mmu_slot_remove_write_access */
 	lockdep_assert_held(&kvm->slots_lock);
 
+	return flush;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
+
+void kvm_mmu_slot_wrprot_lpage_and_clear_dirty(struct kvm *kvm,
+					       struct kvm_memory_slot *memslot)
+{
+	bool flush;
+
+	flush = kvm_mmu_slot_leaf_clear_dirty(kvm, memslot);
+	flush |= kvm_mmu_slot_largepage_remove_write_access(kvm, memslot);
 	if (flush)
 		kvm_flush_remote_tlbs(kvm);
 }
-EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
+EXPORT_SYMBOL_GPL(kvm_mmu_slot_wrprot_lpage_and_clear_dirty);
 
 void kvm_mmu_slot_set_dirty(struct kvm *kvm,
 			    struct kvm_memory_slot *memslot)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bb7696056072..d9be8e631f17 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -14380,8 +14380,7 @@  static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
 static void vmx_slot_enable_log_dirty(struct kvm *kvm,
 				     struct kvm_memory_slot *slot)
 {
-	kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
-	kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
+	kvm_mmu_slot_wrprot_lpage_and_clear_dirty(kvm, slot);
 }
 
 static void vmx_slot_disable_log_dirty(struct kvm *kvm,