diff mbox series

[19/21] KVM: guest_memfd: add API to undo kvm_gmem_get_uninit_pfn

Message ID 20240227232100.478238-20-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show
Series TDX/SNP part 1 of n, for 6.9 | expand

Commit Message

Paolo Bonzini Feb. 27, 2024, 11:20 p.m. UTC
In order to be able to redo kvm_gmem_get_uninit_pfn, a hole must be punched
into the filemap, thus allowing FGP_CREAT_ONLY to succeed again.  This will
be used whenever an operation that follows kvm_gmem_get_uninit_pfn fails.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 include/linux/kvm_host.h |  7 +++++++
 virt/kvm/guest_memfd.c   | 28 ++++++++++++++++++++++++++++
 2 files changed, 35 insertions(+)

Comments

Xu Yilun March 4, 2024, 4:44 a.m. UTC | #1
On Tue, Feb 27, 2024 at 06:20:58PM -0500, Paolo Bonzini wrote:
> In order to be able to redo kvm_gmem_get_uninit_pfn, a hole must be punched
> into the filemap, thus allowing FGP_CREAT_ONLY to succeed again.  This will
> be used whenever an operation that follows kvm_gmem_get_uninit_pfn fails.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  include/linux/kvm_host.h |  7 +++++++
>  virt/kvm/guest_memfd.c   | 28 ++++++++++++++++++++++++++++
>  2 files changed, 35 insertions(+)
> 
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 03bf616b7308..192c58116220 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -2436,6 +2436,8 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
>  		     gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
>  int kvm_gmem_get_uninit_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
>  		            gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
> +int kvm_gmem_undo_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
> +			  gfn_t gfn, int order);
>  #else
>  static inline int kvm_gmem_get_pfn(struct kvm *kvm,
>  				   struct kvm_memory_slot *slot, gfn_t gfn,
> @@ -2452,6 +2454,11 @@ static inline int kvm_gmem_get_uninit_pfn(struct kvm *kvm,
>  	KVM_BUG_ON(1, kvm);
>  	return -EIO;
>  }
> +
> +static inline int kvm_gmem_undo_get_pfn(struct kvm *kvm,
> +				        struct kvm_memory_slot *slot, gfn_t gfn,
> +				        int order)
> +{}

return -EIO;

or compiler would complain that no return value.

>  #endif /* CONFIG_KVM_PRIVATE_MEM */
>  
>  #ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
> diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
> index 7ec7afafc960..535ef1aa34fb 100644
> --- a/virt/kvm/guest_memfd.c
> +++ b/virt/kvm/guest_memfd.c
> @@ -590,3 +590,31 @@ int kvm_gmem_get_uninit_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
>  	return __kvm_gmem_get_pfn(kvm, slot, gfn, pfn, max_order, false);
>  }
>  EXPORT_SYMBOL_GPL(kvm_gmem_get_uninit_pfn);
> +
> +int kvm_gmem_undo_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
> +		          gfn_t gfn, int order)

Didn't see the caller yet, but do we need to ensure the gfn is aligned
with page order? e.g.

	WARN_ON(gfn & ((1UL << order) - 1));

> +{
> +	pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
> +	struct kvm_gmem *gmem;
> +	struct file *file;
> +	int r;
> +
> +	file = kvm_gmem_get_file(slot);
> +	if (!file)
> +		return -EFAULT;
> +
> +	gmem = file->private_data;
> +
> +	if (WARN_ON_ONCE(xa_load(&gmem->bindings, index) != slot)) {
> +		r = -EIO;
> +		goto out_fput;
> +	}
> +
> +	r = kvm_gmem_punch_hole(file_inode(file), index << PAGE_SHIFT, PAGE_SHIFT << order);
                                                                       ^
PAGE_SIZE << order

Thanks,
Yilun

> +
> +out_fput:
> +	fput(file);
> +
> +	return r;
> +}
> +EXPORT_SYMBOL_GPL(kvm_gmem_undo_get_pfn);
> -- 
> 2.39.0
> 
> 
>
diff mbox series

Patch

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 03bf616b7308..192c58116220 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2436,6 +2436,8 @@  int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
 		     gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
 int kvm_gmem_get_uninit_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
 		            gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
+int kvm_gmem_undo_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+			  gfn_t gfn, int order);
 #else
 static inline int kvm_gmem_get_pfn(struct kvm *kvm,
 				   struct kvm_memory_slot *slot, gfn_t gfn,
@@ -2452,6 +2454,11 @@  static inline int kvm_gmem_get_uninit_pfn(struct kvm *kvm,
 	KVM_BUG_ON(1, kvm);
 	return -EIO;
 }
+
+static inline int kvm_gmem_undo_get_pfn(struct kvm *kvm,
+				        struct kvm_memory_slot *slot, gfn_t gfn,
+				        int order)
+{}
 #endif /* CONFIG_KVM_PRIVATE_MEM */
 
 #ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 7ec7afafc960..535ef1aa34fb 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -590,3 +590,31 @@  int kvm_gmem_get_uninit_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
 	return __kvm_gmem_get_pfn(kvm, slot, gfn, pfn, max_order, false);
 }
 EXPORT_SYMBOL_GPL(kvm_gmem_get_uninit_pfn);
+
+int kvm_gmem_undo_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+		          gfn_t gfn, int order)
+{
+	pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
+	struct kvm_gmem *gmem;
+	struct file *file;
+	int r;
+
+	file = kvm_gmem_get_file(slot);
+	if (!file)
+		return -EFAULT;
+
+	gmem = file->private_data;
+
+	if (WARN_ON_ONCE(xa_load(&gmem->bindings, index) != slot)) {
+		r = -EIO;
+		goto out_fput;
+	}
+
+	r = kvm_gmem_punch_hole(file_inode(file), index << PAGE_SHIFT, PAGE_SHIFT << order);
+
+out_fput:
+	fput(file);
+
+	return r;
+}
+EXPORT_SYMBOL_GPL(kvm_gmem_undo_get_pfn);