diff mbox series

[02/10] kvm: mmu: Allow reading hardware dirty state from kvm_get_dirty_log_protect

Message ID 20181020031543.124399-3-junaids@google.com (mailing list archive)
State New, archived
Headers show
Series [01/10] kvm: mmu: spte_write_protect optimization | expand

Commit Message

Junaid Shahid Oct. 20, 2018, 3:15 a.m. UTC
kvm_get_dirty_log_protect() currently gets the dirty state from the dirty
bitmap only. Any dirty state maintained by hardware has to be flushed to
the dirty bitmap beforehand. Add an ability to potentially read any
hardware dirty state directly from within kvm_get_dirty_log_protect.

Signed-off-by: Junaid Shahid <junaids@google.com>
---
 arch/mips/kvm/mmu.c             | 16 +++++++++-------
 arch/x86/include/asm/kvm_host.h | 13 ++++++++-----
 arch/x86/kvm/mmu.c              | 30 ++++++++++++++++++++----------
 arch/x86/kvm/vmx.c              |  8 ++++----
 include/linux/kvm_host.h        |  8 ++++----
 virt/kvm/arm/mmu.c              |  9 +++++----
 virt/kvm/kvm_main.c             | 21 +++++++--------------
 7 files changed, 57 insertions(+), 48 deletions(-)
diff mbox series

Patch

diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index d8dcdb350405..561279db5374 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -428,7 +428,7 @@  int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
 }
 
 /**
- * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
+ * kvm_arch_mmu_get_and_reset_log_dirty() - write protect dirty pages
  * @kvm:	The KVM pointer
  * @slot:	The memory slot associated with mask
  * @gfn_offset:	The gfn offset in memory slot
@@ -438,15 +438,17 @@  int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
  * Walks bits set in mask write protects the associated pte's. Caller must
  * acquire @kvm->mmu_lock.
  */
-void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
+void kvm_arch_mmu_get_and_reset_log_dirty(struct kvm *kvm,
 		struct kvm_memory_slot *slot,
-		gfn_t gfn_offset, unsigned long mask)
+		gfn_t gfn_offset, unsigned long *mask)
 {
-	gfn_t base_gfn = slot->base_gfn + gfn_offset;
-	gfn_t start = base_gfn +  __ffs(mask);
-	gfn_t end = base_gfn + __fls(mask);
+	if (*mask != 0) {
+		gfn_t base_gfn = slot->base_gfn + gfn_offset;
+		gfn_t start = base_gfn +  __ffs(*mask);
+		gfn_t end = base_gfn + __fls(*mask);
 
-	kvm_mips_mkclean_gpa_pt(kvm, start, end);
+		kvm_mips_mkclean_gpa_pt(kvm, start, end);
+	}
 }
 
 /*
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55e51ff7e421..796a44d100c1 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1123,18 +1123,21 @@  struct kvm_x86_ops {
 	 *	also called when slot is created with log dirty disabled.
 	 *  - flush_log_dirty:
 	 *	called before reporting dirty_bitmap to userspace.
-	 *  - enable_log_dirty_pt_masked:
+	 *  - get_and_reset_log_dirty:
 	 *	called when reenabling log dirty for the GFNs in the mask after
-	 *	corresponding bits are cleared in slot->dirty_bitmap.
+	 *      corresponding bits are cleared in slot->dirty_bitmap. This
+	 *      function can also add any un-flushed dirty state maintained by
+	 *      the hardware to the mask (e.g. if flush_log_dirty is not
+	 *      implemented.)
 	 */
 	void (*slot_enable_log_dirty)(struct kvm *kvm,
 				      struct kvm_memory_slot *slot);
 	void (*slot_disable_log_dirty)(struct kvm *kvm,
 				       struct kvm_memory_slot *slot);
 	void (*flush_log_dirty)(struct kvm *kvm);
-	void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
-					   struct kvm_memory_slot *slot,
-					   gfn_t offset, unsigned long mask);
+	void (*get_and_reset_log_dirty)(struct kvm *kvm,
+					struct kvm_memory_slot *slot,
+					gfn_t offset, unsigned long *mask);
 	int (*write_log_dirty)(struct kvm_vcpu *vcpu);
 
 	/* pmu operations of sub-arch */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 189e21c77525..752508892b08 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1641,24 +1641,34 @@  void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
 EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);
 
 /**
- * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
- * PT level pages.
+ * Gets the dirty state (if any) for selected PT level pages from the hardware
+ * MMU structures and resets the hardware state to track those pages again.
  *
- * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
- * enable dirty logging for them.
+ * mask is initially set to the contents of the slot's dirty_bitmap for the
+ * pages starting at gfn_offset. Any pages marked dirty in the hardware state
+ * should also be marked in mask before this function returns.
+ *
+ * If the hardware dirty state has already been flushed to the slot's
+ * dirty_bitmap beforehand (e.g. through kvm_x86_ops->flush_log_dirty) then this
+ * function just needs to reset the hardware structures to keep tracking the
+ * pages.
+ *
+ * If the hardware does not maintain dirty state at all, then this function
+ * just write protects the selected pages to enable software-based dirty logging
+ * for them.
  *
  * Used when we do not need to care about huge page mappings: e.g. during dirty
  * logging we do not have any such mappings.
  */
-void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
+void kvm_arch_mmu_get_and_reset_log_dirty(struct kvm *kvm,
 				struct kvm_memory_slot *slot,
-				gfn_t gfn_offset, unsigned long mask)
+				gfn_t gfn_offset, unsigned long *mask)
 {
-	if (kvm_x86_ops->enable_log_dirty_pt_masked)
-		kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
-				mask);
+	if (kvm_x86_ops->get_and_reset_log_dirty)
+		kvm_x86_ops->get_and_reset_log_dirty(kvm, slot, gfn_offset,
+						     mask);
 	else
-		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, *mask);
 }
 
 /**
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 641a65b30685..bb7696056072 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7991,7 +7991,7 @@  static __init int hardware_setup(void)
 		kvm_x86_ops->slot_enable_log_dirty = NULL;
 		kvm_x86_ops->slot_disable_log_dirty = NULL;
 		kvm_x86_ops->flush_log_dirty = NULL;
-		kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
+		kvm_x86_ops->get_and_reset_log_dirty = NULL;
 	}
 
 	if (!cpu_has_vmx_preemption_timer())
@@ -14437,9 +14437,9 @@  static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
 
 static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
 					   struct kvm_memory_slot *memslot,
-					   gfn_t offset, unsigned long mask)
+					   gfn_t offset, unsigned long *mask)
 {
-	kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
+	kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, *mask);
 }
 
 static void __pi_post_block(struct kvm_vcpu *vcpu)
@@ -15076,7 +15076,7 @@  static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 	.slot_enable_log_dirty = vmx_slot_enable_log_dirty,
 	.slot_disable_log_dirty = vmx_slot_disable_log_dirty,
 	.flush_log_dirty = vmx_flush_log_dirty,
-	.enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
+	.get_and_reset_log_dirty = vmx_enable_log_dirty_pt_masked,
 	.write_log_dirty = vmx_write_pml_buffer,
 
 	.pre_block = vmx_pre_block,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c926698040e0..710c3f5d50ba 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -755,10 +755,10 @@  int kvm_get_dirty_log(struct kvm *kvm,
 int kvm_get_dirty_log_protect(struct kvm *kvm,
 			struct kvm_dirty_log *log, bool *is_dirty);
 
-void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
-					struct kvm_memory_slot *slot,
-					gfn_t gfn_offset,
-					unsigned long mask);
+void kvm_arch_mmu_get_and_reset_log_dirty(struct kvm *kvm,
+					  struct kvm_memory_slot *slot,
+					  gfn_t gfn_offset,
+					  unsigned long *mask);
 
 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 				struct kvm_dirty_log *log);
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index c23a1b323aad..11a8aa7a26b4 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1434,17 +1434,18 @@  static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
 }
 
 /*
- * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
+ * kvm_arch_mmu_get_and_reset_log_dirty - enable dirty logging for selected
  * dirty pages.
  *
  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
  * enable dirty logging for them.
  */
-void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
+void kvm_arch_mmu_get_and_reset_log_dirty(struct kvm *kvm,
 		struct kvm_memory_slot *slot,
-		gfn_t gfn_offset, unsigned long mask)
+		gfn_t gfn_offset, unsigned long *mask)
 {
-	kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+	if (*mask != 0)
+		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, *mask);
 }
 
 static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 786ade1843a2..565563710687 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1179,27 +1179,20 @@  int kvm_get_dirty_log_protect(struct kvm *kvm,
 	n = kvm_dirty_bitmap_bytes(memslot);
 
 	dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
-	memset(dirty_bitmap_buffer, 0, n);
 
 	spin_lock(&kvm->mmu_lock);
 	*is_dirty = false;
 	for (i = 0; i < n / sizeof(long); i++) {
-		unsigned long mask;
-		gfn_t offset;
+		unsigned long mask = 0;
+		gfn_t offset = i * BITS_PER_LONG;
 
-		if (!dirty_bitmap[i])
-			continue;
+		if (dirty_bitmap[i])
+			mask = xchg(&dirty_bitmap[i], 0);
 
-		*is_dirty = true;
-
-		mask = xchg(&dirty_bitmap[i], 0);
+		kvm_arch_mmu_get_and_reset_log_dirty(kvm, memslot, offset,
+						     &mask);
+		*is_dirty |= mask != 0;
 		dirty_bitmap_buffer[i] = mask;
-
-		if (mask) {
-			offset = i * BITS_PER_LONG;
-			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
-								offset, mask);
-		}
 	}
 
 	spin_unlock(&kvm->mmu_lock);