diff mbox series

[07/11] KVM: guest_memfd: extract __kvm_gmem_get_pfn()

Message ID 20240404185034.3184582-8-pbonzini@redhat.com (mailing list archive)
State New
Headers show
Series KVM: guest_memfd: New hooks and functionality for SEV-SNP and TDX | expand

Commit Message

Paolo Bonzini April 4, 2024, 6:50 p.m. UTC
In preparation for adding a function that walks a set of pages
provided by userspace and populates them in a guest_memfd,
add a version of kvm_gmem_get_pfn() that has a "bool prepare"
argument and passes it down to kvm_gmem_get_folio().

Populating guest memory has to call repeatedly __kvm_gmem_get_pfn()
on the same file, so make the new function take struct file*.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 virt/kvm/guest_memfd.c | 38 +++++++++++++++++++++++---------------
 1 file changed, 23 insertions(+), 15 deletions(-)

Comments

Michael Roth April 9, 2024, 11:35 p.m. UTC | #1
On Thu, Apr 04, 2024 at 02:50:29PM -0400, Paolo Bonzini wrote:
> In preparation for adding a function that walks a set of pages
> provided by userspace and populates them in a guest_memfd,
> add a version of kvm_gmem_get_pfn() that has a "bool prepare"
> argument and passes it down to kvm_gmem_get_folio().
> 
> Populating guest memory has to call repeatedly __kvm_gmem_get_pfn()
> on the same file, so make the new function take struct file*.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  virt/kvm/guest_memfd.c | 38 +++++++++++++++++++++++---------------
>  1 file changed, 23 insertions(+), 15 deletions(-)
> 
> diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
> index 486748e65f36..a537a7e63ab5 100644
> --- a/virt/kvm/guest_memfd.c
> +++ b/virt/kvm/guest_memfd.c
> @@ -540,33 +540,29 @@ void kvm_gmem_unbind(struct kvm_memory_slot *slot)
>  	fput(file);
>  }
>  
> -int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
> -		     gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
> +static int __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
> +		       gfn_t gfn, kvm_pfn_t *pfn, int *max_order, bool prepare)
>  {
>  	pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
> -	struct kvm_gmem *gmem;
> +	struct kvm_gmem *gmem = file->private_data;
>  	struct folio *folio;
>  	struct page *page;
> -	struct file *file;
>  	int r;
>  
> -	file = kvm_gmem_get_file(slot);
> -	if (!file)
> +	if (file != slot->gmem.file) {
> +		WARN_ON_ONCE(slot->gmem.file);
>  		return -EFAULT;
> +	}
>  
>  	gmem = file->private_data;
> -
>  	if (xa_load(&gmem->bindings, index) != slot) {
>  		WARN_ON_ONCE(xa_load(&gmem->bindings, index));
> -		r = -EIO;
> -		goto out_fput;
> +		return -EIO;
>  	}
>  
>  	folio = kvm_gmem_get_folio(file_inode(file), index, true);

This should be:

  folio = kvm_gmem_get_folio(file_inode(file), index, prepare);

Otherwise:

Reviewed-by: Michael Roth <michael.roth@amd.com>

-Mike

> -	if (!folio) {
> -		r = -ENOMEM;
> -		goto out_fput;
> -	}
> +	if (!folio)
> +		return -ENOMEM;
>  
>  	if (folio_test_hwpoison(folio)) {
>  		r = -EHWPOISON;
> @@ -583,9 +579,21 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
>  
>  out_unlock:
>  	folio_unlock(folio);
> -out_fput:
> -	fput(file);
>  
>  	return r;
>  }
> +
> +int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
> +		     gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
> +{
> +	struct file *file = kvm_gmem_get_file(slot);
> +	int r;
> +
> +	if (!file)
> +		return -EFAULT;
> +
> +	r = __kvm_gmem_get_pfn(file, slot, gfn, pfn, max_order, true);
> +	fput(file);
> +	return r;
> +}
>  EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);
> -- 
> 2.43.0
> 
>
Sean Christopherson April 24, 2024, 10:34 p.m. UTC | #2
On Thu, Apr 04, 2024, Paolo Bonzini wrote:
> In preparation for adding a function that walks a set of pages
> provided by userspace and populates them in a guest_memfd,
> add a version of kvm_gmem_get_pfn() that has a "bool prepare"
> argument and passes it down to kvm_gmem_get_folio().
> 
> Populating guest memory has to call repeatedly __kvm_gmem_get_pfn()
> on the same file, so make the new function take struct file*.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  virt/kvm/guest_memfd.c | 38 +++++++++++++++++++++++---------------
>  1 file changed, 23 insertions(+), 15 deletions(-)
> 
> diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
> index 486748e65f36..a537a7e63ab5 100644
> --- a/virt/kvm/guest_memfd.c
> +++ b/virt/kvm/guest_memfd.c
> @@ -540,33 +540,29 @@ void kvm_gmem_unbind(struct kvm_memory_slot *slot)
>  	fput(file);
>  }
>  
> -int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
> -		     gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
> +static int __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
> +		       gfn_t gfn, kvm_pfn_t *pfn, int *max_order, bool prepare)

I genuinely don't know what it means to "prepare" a guest_memfd.  I see it becomes

	if (!prepare)
		fgp_flags |= FGP_CREAT_ONLY;

but I find the name "prepare" to be extremely unhelpful.
Sean Christopherson April 24, 2024, 10:59 p.m. UTC | #3
On Wed, Apr 24, 2024, Sean Christopherson wrote:
> On Thu, Apr 04, 2024, Paolo Bonzini wrote:
> > In preparation for adding a function that walks a set of pages
> > provided by userspace and populates them in a guest_memfd,
> > add a version of kvm_gmem_get_pfn() that has a "bool prepare"
> > argument and passes it down to kvm_gmem_get_folio().
> > 
> > Populating guest memory has to call repeatedly __kvm_gmem_get_pfn()
> > on the same file, so make the new function take struct file*.
> > 
> > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> > ---
> >  virt/kvm/guest_memfd.c | 38 +++++++++++++++++++++++---------------
> >  1 file changed, 23 insertions(+), 15 deletions(-)
> > 
> > diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
> > index 486748e65f36..a537a7e63ab5 100644
> > --- a/virt/kvm/guest_memfd.c
> > +++ b/virt/kvm/guest_memfd.c
> > @@ -540,33 +540,29 @@ void kvm_gmem_unbind(struct kvm_memory_slot *slot)
> >  	fput(file);
> >  }
> >  
> > -int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
> > -		     gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
> > +static int __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
> > +		       gfn_t gfn, kvm_pfn_t *pfn, int *max_order, bool prepare)
> 
> I genuinely don't know what it means to "prepare" a guest_memfd.  I see it becomes
> 
> 	if (!prepare)
> 		fgp_flags |= FGP_CREAT_ONLY;
> 
> but I find the name "prepare" to be extremely unhelpful.

Ah, I'm blind.  Maybe "do_prepare", or "do_arch_prepare"?  To make it clear that
it's a command, not a description of the operation (which is how I first read it).

And I feel like overloading it to also mean FGP_CREAT_ONLY when _not_ preparing
the memory is odd.

	if (prepare) {
		int r =	kvm_gmem_prepare_folio(inode, index, folio);
		if (r < 0) {
			folio_unlock(folio);
			folio_put(folio);
			return ERR_PTR(r);
		}
	}

Instead of "prepare" as a command, would it make sense to describe the "populating"
case?  Because I think it's more intuitive that populating _needs_ to operate on
new, unprepared data.
diff mbox series

Patch

diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 486748e65f36..a537a7e63ab5 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -540,33 +540,29 @@  void kvm_gmem_unbind(struct kvm_memory_slot *slot)
 	fput(file);
 }
 
-int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
-		     gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
+static int __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
+		       gfn_t gfn, kvm_pfn_t *pfn, int *max_order, bool prepare)
 {
 	pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
-	struct kvm_gmem *gmem;
+	struct kvm_gmem *gmem = file->private_data;
 	struct folio *folio;
 	struct page *page;
-	struct file *file;
 	int r;
 
-	file = kvm_gmem_get_file(slot);
-	if (!file)
+	if (file != slot->gmem.file) {
+		WARN_ON_ONCE(slot->gmem.file);
 		return -EFAULT;
+	}
 
 	gmem = file->private_data;
-
 	if (xa_load(&gmem->bindings, index) != slot) {
 		WARN_ON_ONCE(xa_load(&gmem->bindings, index));
-		r = -EIO;
-		goto out_fput;
+		return -EIO;
 	}
 
 	folio = kvm_gmem_get_folio(file_inode(file), index, true);
-	if (!folio) {
-		r = -ENOMEM;
-		goto out_fput;
-	}
+	if (!folio)
+		return -ENOMEM;
 
 	if (folio_test_hwpoison(folio)) {
 		r = -EHWPOISON;
@@ -583,9 +579,21 @@  int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
 
 out_unlock:
 	folio_unlock(folio);
-out_fput:
-	fput(file);
 
 	return r;
 }
+
+int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+		     gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
+{
+	struct file *file = kvm_gmem_get_file(slot);
+	int r;
+
+	if (!file)
+		return -EFAULT;
+
+	r = __kvm_gmem_get_pfn(file, slot, gfn, pfn, max_order, true);
+	fput(file);
+	return r;
+}
 EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);