diff mbox series

[v5,2/7] KVM: Add KVM_PRE_FAULT_MEMORY vcpu ioctl to pre-populate guest memory

Message ID 20240710174031.312055-3-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: Guest Memory Pre-Population API | expand

Commit Message

Paolo Bonzini July 10, 2024, 5:40 p.m. UTC
From: Isaku Yamahata <isaku.yamahata@intel.com>

Add a new ioctl KVM_PRE_FAULT_MEMORY in the KVM common code. It iterates on the
memory range and calls the arch-specific function.  The implementation is
optional and enabled by a Kconfig symbol.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Reviewed-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Message-ID: <819322b8f25971f2b9933bfa4506e618508ad782.1712785629.git.isaku.yamahata@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 include/linux/kvm_host.h |  5 ++++
 include/uapi/linux/kvm.h | 10 +++++++
 virt/kvm/Kconfig         |  3 ++
 virt/kvm/kvm_main.c      | 60 ++++++++++++++++++++++++++++++++++++++++
 4 files changed, 78 insertions(+)

Comments

Anish Moorthy July 10, 2024, 8:46 p.m. UTC | #1
On Wed, Jul 10, 2024 at 10:41 AM Paolo Bonzini <pbonzini@redhat.com> wrote:
>
> +       if (!PAGE_ALIGNED(range->gpa) ||
> +           !PAGE_ALIGNED(range->size) ||
> ...
> +               return -EINVAL;

If 'gpa' and 'size' must be page-aligned anyways, doesn't it make
sense to just take a 'gfn' and 'num_pages'  and eliminate this error
condition?
Sean Christopherson July 10, 2024, 9:56 p.m. UTC | #2
On Wed, Jul 10, 2024, Anish Moorthy wrote:
> On Wed, Jul 10, 2024 at 10:41 AM Paolo Bonzini <pbonzini@redhat.com> wrote:
> >
> > +       if (!PAGE_ALIGNED(range->gpa) ||
> > +           !PAGE_ALIGNED(range->size) ||
> > ...
> > +               return -EINVAL;
> 
> If 'gpa' and 'size' must be page-aligned anyways, doesn't it make
> sense to just take a 'gfn' and 'num_pages'  and eliminate this error
> condition?

The downside is that taking gfn+num_pages prevents supporting sub-page pre-faulting
in the future.  I highly doubt that sub-page mappings will ever be a thing in KVM,
but two PAGE_ALIGNED() checks is super cheap, so it's soft of a "why not?" scenario.
Paolo Bonzini July 10, 2024, 10:04 p.m. UTC | #3
On Wed, Jul 10, 2024 at 11:56 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Wed, Jul 10, 2024, Anish Moorthy wrote:
> > On Wed, Jul 10, 2024 at 10:41 AM Paolo Bonzini <pbonzini@redhat.com> wrote:
> > >
> > > +       if (!PAGE_ALIGNED(range->gpa) ||
> > > +           !PAGE_ALIGNED(range->size) ||
> > > ...
> > > +               return -EINVAL;
> >
> > If 'gpa' and 'size' must be page-aligned anyways, doesn't it make
> > sense to just take a 'gfn' and 'num_pages'  and eliminate this error
> > condition?
>
> The downside is that taking gfn+num_pages prevents supporting sub-page pre-faulting
> in the future.  I highly doubt that sub-page mappings will ever be a thing in KVM,
> but two PAGE_ALIGNED() checks is super cheap, so it's soft of a "why not?" scenario.

With ARM having multiple page sizes, and not necessarily the same size
in host and guest, using a gfn argument is also unnecessarily
confusing.

Paolo
diff mbox series

Patch

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 7b57878c8c18..c3c922bf077f 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2477,4 +2477,9 @@  long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages
 void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
 #endif
 
+#ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
+long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
+				    struct kvm_pre_fault_memory *range);
+#endif
+
 #endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index d03842abae57..e5af8c692dc0 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -917,6 +917,7 @@  struct kvm_enable_cap {
 #define KVM_CAP_MEMORY_ATTRIBUTES 233
 #define KVM_CAP_GUEST_MEMFD 234
 #define KVM_CAP_VM_TYPES 235
+#define KVM_CAP_PRE_FAULT_MEMORY 236
 
 struct kvm_irq_routing_irqchip {
 	__u32 irqchip;
@@ -1548,4 +1549,13 @@  struct kvm_create_guest_memfd {
 	__u64 reserved[6];
 };
 
+#define KVM_PRE_FAULT_MEMORY	_IOWR(KVMIO, 0xd5, struct kvm_pre_fault_memory)
+
+struct kvm_pre_fault_memory {
+	__u64 gpa;
+	__u64 size;
+	__u64 flags;
+	__u64 padding[5];
+};
+
 #endif /* __LINUX_KVM_H */
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 754c6c923427..b14e14cdbfb9 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -67,6 +67,9 @@  config HAVE_KVM_INVALID_WAKEUPS
 config KVM_GENERIC_DIRTYLOG_READ_PROTECT
        bool
 
+config KVM_GENERIC_PRE_FAULT_MEMORY
+       bool
+
 config KVM_COMPAT
        def_bool y
        depends on KVM && COMPAT && !(S390 || ARM64 || RISCV)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8e422c2c9450..f817ec66c85f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4373,6 +4373,52 @@  static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
 	return fd;
 }
 
+#ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
+static int kvm_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
+				     struct kvm_pre_fault_memory *range)
+{
+	int idx;
+	long r;
+	u64 full_size;
+
+	if (range->flags)
+		return -EINVAL;
+
+	if (!PAGE_ALIGNED(range->gpa) ||
+	    !PAGE_ALIGNED(range->size) ||
+	    range->gpa + range->size <= range->gpa)
+		return -EINVAL;
+
+	vcpu_load(vcpu);
+	idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+	full_size = range->size;
+	do {
+		if (signal_pending(current)) {
+			r = -EINTR;
+			break;
+		}
+
+		r = kvm_arch_vcpu_pre_fault_memory(vcpu, range);
+		if (WARN_ON_ONCE(r == 0 || r == -EIO))
+			break;
+
+		if (r < 0)
+			break;
+
+		range->size -= r;
+		range->gpa += r;
+		cond_resched();
+	} while (range->size);
+
+	srcu_read_unlock(&vcpu->kvm->srcu, idx);
+	vcpu_put(vcpu);
+
+	/* Return success if at least one page was mapped successfully.  */
+	return full_size == range->size ? r : 0;
+}
+#endif
+
 static long kvm_vcpu_ioctl(struct file *filp,
 			   unsigned int ioctl, unsigned long arg)
 {
@@ -4573,6 +4619,20 @@  static long kvm_vcpu_ioctl(struct file *filp,
 		r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
 		break;
 	}
+#ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
+	case KVM_PRE_FAULT_MEMORY: {
+		struct kvm_pre_fault_memory range;
+
+		r = -EFAULT;
+		if (copy_from_user(&range, argp, sizeof(range)))
+			break;
+		r = kvm_vcpu_pre_fault_memory(vcpu, &range);
+		/* Pass back leftover range. */
+		if (copy_to_user(argp, &range, sizeof(range)))
+			r = -EFAULT;
+		break;
+	}
+#endif
 	default:
 		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
 	}