diff mbox series

[3/8] KVM: arm64: use page tracking interface to enable dirty logging

Message ID 20240918152807.25135-4-lilitj@amazon.com (mailing list archive)
State New
Headers show
Series *** RFC: ARM KVM dirty tracking device *** | expand

Commit Message

Lilit Janpoladyan Sept. 18, 2024, 3:28 p.m. UTC
If page tracking device is available, use it to enable and disable
hardware dirty tracking. Allocate a tracking context on the first dirty
logging enablement (for the first memslot) and deallocate the context
when dirty logging is off for the VMID.

Allocation and use of the context is not synchronized as they are done
from the VM ioctls.

Signed-off-by: Lilit Janpoladyan <lilitj@amazon.com>
---
 arch/arm64/include/asm/kvm_host.h |  5 +++
 arch/arm64/kvm/arm.c              | 54 +++++++++++++++++++++++++++++++
 arch/mips/kvm/mips.c              | 10 ++++++
 arch/powerpc/kvm/book3s.c         | 10 ++++++
 arch/powerpc/kvm/booke.c          | 10 ++++++
 arch/s390/kvm/kvm-s390.c          | 10 ++++++
 arch/x86/kvm/x86.c                | 10 ++++++
 include/linux/kvm_host.h          |  2 ++
 virt/kvm/kvm_main.c               | 19 +++++++----
 9 files changed, 123 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 5b5e3647fbda..db9bf42123e1 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -377,6 +377,11 @@  struct kvm_arch {
 	 * the associated pKVM instance in the hypervisor.
 	 */
 	struct kvm_protected_vm pkvm;
+
+	/*
+	 * Stores page tracking context if page tracking device is in use
+	 */
+	void *page_tracking_ctx;
 };
 
 struct kvm_vcpu_fault_info {
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index aea56df8ac04..c8dcf719ee99 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -267,6 +267,9 @@  static void kvm_destroy_mpidr_data(struct kvm *kvm)
  */
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
+	if (kvm->arch.page_tracking_ctx)
+		page_tracking_release(kvm->arch.page_tracking_ctx);
+
 	bitmap_free(kvm->arch.pmu_filter);
 	free_cpumask_var(kvm->arch.supported_cpus);
 
@@ -1816,6 +1819,57 @@  long kvm_arch_vcpu_ioctl(struct file *filp,
 	return r;
 }
 
+int kvm_arch_enable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot)
+{
+	void *ctx = NULL;
+	struct pt_config config;
+	int r;
+
+	if (!page_tracking_device_registered())
+		return 0;
+
+	if (!kvm->arch.page_tracking_ctx) {
+		config.vmid = (u32)kvm->arch.mmu.vmid.id.counter;
+		config.mode = dirty_pages;
+		ctx = page_tracking_allocate(config);
+		if (!ctx)
+			return -ENOENT;
+
+		kvm->arch.page_tracking_ctx = ctx;
+	}
+
+	r = page_tracking_enable(kvm->arch.page_tracking_ctx, -1);
+
+	if (r) {
+		if (ctx) {
+			page_tracking_release(ctx);
+			kvm->arch.page_tracking_ctx = NULL;
+		}
+	}
+	return r;
+}
+
+int kvm_arch_disable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot)
+{
+	int r = 0;
+
+	if (!page_tracking_device_registered())
+		return 0;
+
+	if (!kvm->arch.page_tracking_ctx)
+		return -ENOENT;
+
+	r = page_tracking_disable(kvm->arch.page_tracking_ctx, -1);
+
+	if (r == -EBUSY) {
+		r = 0;
+	} else {
+		page_tracking_release(kvm->arch.page_tracking_ctx);
+		kvm->arch.page_tracking_ctx = NULL;
+	}
+	return r;
+}
+
 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
 {
 
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index b5de770b092e..edc6f473af4e 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -974,6 +974,16 @@  long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
 	return r;
 }
 
+int kvm_arch_enable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot)
+{
+	return 0;
+}
+
+int kvm_arch_disable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot)
+{
+	return 0;
+}
+
 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
 {
 
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index ff6c38373957..4c4a3ecc301c 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -844,6 +844,16 @@  int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
 	return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
 }
 
+int kvm_arch_enable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot)
+{
+	return 0;
+}
+
+int kvm_arch_disable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot)
+{
+	return 0;
+}
+
 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
 {
 
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 6a5be025a8af..f263ebc8fa49 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1814,6 +1814,16 @@  int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 	return r;
 }
 
+int kvm_arch_enable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot)
+{
+	return -EOPNOTSUPP;
+}
+
+int kvm_arch_disable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot)
+{
+	return -EOPNOTSUPP;
+}
+
 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
 {
 
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0fd96860fc45..d6a8f7dbc644 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -667,6 +667,16 @@  int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 	return r;
 }
 
+int kvm_arch_enable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot)
+{
+	return 0;
+}
+
+int kvm_arch_disable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot)
+{
+	return 0;
+}
+
 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
 {
 	int i;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c983c8e434b8..1be8bacfe2bd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6488,6 +6488,16 @@  static int kvm_vm_ioctl_reinject(struct kvm *kvm,
 	return 0;
 }
 
+int kvm_arch_enable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot)
+{
+	return 0;
+}
+
+int kvm_arch_disable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot)
+{
+	return 0;
+}
+
 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
 {
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0d5125a3e31a..ae905f54ec47 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1475,6 +1475,8 @@  void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
 					struct kvm_memory_slot *slot,
 					gfn_t gfn_offset,
 					unsigned long mask);
+int kvm_arch_enable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot);
+int kvm_arch_disable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot);
 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
 
 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index cb2b78e92910..1fd5e234c188 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1689,11 +1689,12 @@  static int kvm_prepare_memory_region(struct kvm *kvm,
 	return r;
 }
 
-static void kvm_commit_memory_region(struct kvm *kvm,
-				     struct kvm_memory_slot *old,
-				     const struct kvm_memory_slot *new,
-				     enum kvm_mr_change change)
+static int kvm_commit_memory_region(struct kvm *kvm,
+				    struct kvm_memory_slot *old,
+				    const struct kvm_memory_slot *new,
+				    enum kvm_mr_change change)
 {
+	int r;
 	int old_flags = old ? old->flags : 0;
 	int new_flags = new ? new->flags : 0;
 	/*
@@ -1709,6 +1710,10 @@  static void kvm_commit_memory_region(struct kvm *kvm,
 		int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1;
 		atomic_set(&kvm->nr_memslots_dirty_logging,
 			   atomic_read(&kvm->nr_memslots_dirty_logging) + change);
+		if (change > 0)
+			r = kvm_arch_enable_dirty_logging(kvm, new);
+		else
+			r = kvm_arch_disable_dirty_logging(kvm, new);
 	}
 
 	kvm_arch_commit_memory_region(kvm, old, new, change);
@@ -1740,6 +1745,8 @@  static void kvm_commit_memory_region(struct kvm *kvm,
 	default:
 		BUG();
 	}
+
+	return r;
 }
 
 /*
@@ -1954,9 +1961,7 @@  static int kvm_set_memslot(struct kvm *kvm,
 	 * will directly hit the final, active memslot.  Architectures are
 	 * responsible for knowing that new->arch may be stale.
 	 */
-	kvm_commit_memory_region(kvm, old, new, change);
-
-	return 0;
+	return kvm_commit_memory_region(kvm, old, new, change);
 }
 
 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,