diff mbox series

KVM: guest_memfd: Refactor kvm_gmem into inode->i_private

Message ID 8e57c347d6c461431e84ef4354dc076f363f3c01.1695751312.git.isaku.yamahata@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: guest_memfd: Refactor kvm_gmem into inode->i_private | expand

Commit Message

Isaku Yamahata Sept. 26, 2023, 6:03 p.m. UTC
From: Isaku Yamahata <isaku.yamahata@intel.com>

Refactor guest_memfd to use inode->i_private to store info about kvm_gmem.
Currently it is stored in the following way.
- flags in inode->i_private
- struct kvm_gmem in file->private_data
- struct kvm_gmem in linked linst in inode->i_mapping->private_list
  And this list has single entry.

The relationship between struct file, struct inode and struct kvm_gmem is
1:1, not 1:many. Consolidate related info in one place.
- Move flags into struct kvm_gmem
- Store struct kvm_gmem in inode->i_private
- Don't use file->private_data
- Don't use inode->i_mapping_private_list
- Introduce a helper conversion function from inode to kvm_gmem

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
---
 virt/kvm/guest_memfd.c | 53 ++++++++++++++++++------------------------
 1 file changed, 23 insertions(+), 30 deletions(-)

Comments

Sean Christopherson Sept. 26, 2023, 7:31 p.m. UTC | #1
On Tue, Sep 26, 2023, isaku.yamahata@intel.com wrote:
> From: Isaku Yamahata <isaku.yamahata@intel.com>
> 
> Refactor guest_memfd to use inode->i_private to store info about kvm_gmem.

Why!?!?!?  Sadly, I don't have telepathic superpowers.  This changelog only
explains *what* the code is doing, what I need to know is *why* you want to make
this change.

The below kinda sorta suggests that this is to simplify the code, but it's not
at all obvious that that's the actual motivation, or the only motiviation.

> Currently it is stored in the following way.
> - flags in inode->i_private
> - struct kvm_gmem in file->private_data
> - struct kvm_gmem in linked linst in inode->i_mapping->private_list
>   And this list has single entry.
> 
> The relationship between struct file, struct inode and struct kvm_gmem is
> 1:1, not 1:many. 

No, it's not.  Or at least it won't be in the future.  As I explained before[1]:

 : The code is structured to allow for multiple gmem instances per inode.  This isn't
 : actually possible in the initial code base, but it's on the horizon[*].  I included
 : the list-based infrastructure in this initial series to ensure that guest_memfd
 : can actually support multiple files per inode, and to minimize the churn when the
 : "link" support comes along.

 : [*] https://lore.kernel.org/all/cover.1691446946.git.ackerleytng@google.com

[1] https://lore.kernel.org/all/ZQsAiGuw%2F38jIOV7@google.com
diff mbox series

Patch

diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 4f3a313f5532..66dd9b55e85c 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -14,14 +14,19 @@  static struct vfsmount *kvm_gmem_mnt;
 struct kvm_gmem {
 	struct kvm *kvm;
 	struct xarray bindings;
-	struct list_head entry;
+	unsigned long flags;
 };
 
+static struct kvm_gmem *to_gmem(struct inode *inode)
+{
+	return inode->i_private;
+}
+
 static struct folio *kvm_gmem_get_huge_folio(struct inode *inode, pgoff_t index)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 	unsigned long huge_index = round_down(index, HPAGE_PMD_NR);
-	unsigned long flags = (unsigned long)inode->i_private;
+	unsigned long flags = to_gmem(inode)->flags;
 	struct address_space *mapping  = inode->i_mapping;
 	gfp_t gfp = mapping_gfp_mask(mapping);
 	struct folio *folio;
@@ -134,26 +139,22 @@  static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
 
 static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 {
-	struct list_head *gmem_list = &inode->i_mapping->private_list;
+	struct address_space *mapping = inode->i_mapping;
+	struct kvm_gmem *gmem = to_gmem(inode);
 	pgoff_t start = offset >> PAGE_SHIFT;
 	pgoff_t end = (offset + len) >> PAGE_SHIFT;
-	struct kvm_gmem *gmem;
 
 	/*
 	 * Bindings must stable across invalidation to ensure the start+end
 	 * are balanced.
 	 */
-	filemap_invalidate_lock(inode->i_mapping);
-
-	list_for_each_entry(gmem, gmem_list, entry)
-		kvm_gmem_invalidate_begin(gmem, start, end);
+	filemap_invalidate_lock(mapping);
+	kvm_gmem_invalidate_begin(gmem, start, end);
 
 	truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
 
-	list_for_each_entry(gmem, gmem_list, entry)
-		kvm_gmem_invalidate_end(gmem, start, end);
-
-	filemap_invalidate_unlock(inode->i_mapping);
+	kvm_gmem_invalidate_end(gmem, start, end);
+	filemap_invalidate_unlock(mapping);
 
 	return 0;
 }
@@ -231,7 +232,7 @@  static long kvm_gmem_fallocate(struct file *file, int mode, loff_t offset,
 
 static int kvm_gmem_release(struct inode *inode, struct file *file)
 {
-	struct kvm_gmem *gmem = file->private_data;
+	struct kvm_gmem *gmem = to_gmem(inode);
 	struct kvm_memory_slot *slot;
 	struct kvm *kvm = gmem->kvm;
 	unsigned long index;
@@ -260,8 +261,6 @@  static int kvm_gmem_release(struct inode *inode, struct file *file)
 	kvm_gmem_invalidate_begin(gmem, 0, -1ul);
 	kvm_gmem_invalidate_end(gmem, 0, -1ul);
 
-	list_del(&gmem->entry);
-
 	filemap_invalidate_unlock(inode->i_mapping);
 
 	mutex_unlock(&kvm->slots_lock);
@@ -305,8 +304,7 @@  static int kvm_gmem_migrate_folio(struct address_space *mapping,
 
 static int kvm_gmem_error_page(struct address_space *mapping, struct page *page)
 {
-	struct list_head *gmem_list = &mapping->private_list;
-	struct kvm_gmem *gmem;
+	struct kvm_gmem *gmem = to_gmem(mapping->host);
 	pgoff_t start, end;
 
 	filemap_invalidate_lock_shared(mapping);
@@ -314,8 +312,7 @@  static int kvm_gmem_error_page(struct address_space *mapping, struct page *page)
 	start = page->index;
 	end = start + thp_nr_pages(page);
 
-	list_for_each_entry(gmem, gmem_list, entry)
-		kvm_gmem_invalidate_begin(gmem, start, end);
+	kvm_gmem_invalidate_begin(gmem, start, end);
 
 	/*
 	 * Do not truncate the range, what action is taken in response to the
@@ -326,8 +323,7 @@  static int kvm_gmem_error_page(struct address_space *mapping, struct page *page)
 	 * error to userspace.
 	 */
 
-	list_for_each_entry(gmem, gmem_list, entry)
-		kvm_gmem_invalidate_end(gmem, start, end);
+	kvm_gmem_invalidate_end(gmem, start, end);
 
 	filemap_invalidate_unlock_shared(mapping);
 
@@ -382,7 +378,6 @@  static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags,
 	if (err)
 		goto err_inode;
 
-	inode->i_private = (void *)(unsigned long)flags;
 	inode->i_op = &kvm_gmem_iops;
 	inode->i_mapping->a_ops = &kvm_gmem_aops;
 	inode->i_mode |= S_IFREG;
@@ -417,10 +412,9 @@  static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags,
 	kvm_get_kvm(kvm);
 	gmem->kvm = kvm;
 	xa_init(&gmem->bindings);
+	gmem->flags = flags;
 
-	file->private_data = gmem;
-
-	list_add(&gmem->entry, &inode->i_mapping->private_list);
+	inode->i_private = gmem;
 
 	fd_install(fd, file);
 	return fd;
@@ -476,12 +470,11 @@  int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
 	if (file->f_op != &kvm_gmem_fops)
 		goto err;
 
-	gmem = file->private_data;
+	inode = file_inode(file);
+	gmem = to_gmem(inode);
 	if (gmem->kvm != kvm)
 		goto err;
 
-	inode = file_inode(file);
-
 	if (offset < 0 || !PAGE_ALIGNED(offset))
 		return -EINVAL;
 
@@ -538,7 +531,7 @@  void kvm_gmem_unbind(struct kvm_memory_slot *slot)
 	if (!file)
 		return;
 
-	gmem = file->private_data;
+	gmem = to_gmem(file_inode(file));
 
 	filemap_invalidate_lock(file->f_mapping);
 	xa_store_range(&gmem->bindings, start, end - 1, NULL, GFP_KERNEL);
@@ -563,7 +556,7 @@  int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
 	if (!file)
 		return -EFAULT;
 
-	gmem = file->private_data;
+	gmem = to_gmem(file_inode(file));
 
 	if (WARN_ON_ONCE(xa_load(&gmem->bindings, index) != slot)) {
 		r = -EIO;