@@ -91,7 +91,7 @@ static struct folio *kvm_gmem_get_huge_folio(struct inode *inode, pgoff_t index,
{
pgoff_t npages = 1UL << order;
pgoff_t huge_index = round_down(index, npages);
- struct address_space *mapping = inode->i_mapping;
+ struct address_space *mapping = inode->i_mapping;
gfp_t gfp = mapping_gfp_mask(mapping) | __GFP_NOWARN;
loff_t size = i_size_read(inode);
struct folio *folio;
@@ -125,16 +125,16 @@ static struct folio *kvm_gmem_get_huge_folio(struct inode *inode, pgoff_t index,
* Ignore accessed, referenced, and dirty flags. The memory is
* unevictable and there is no storage to write back to.
*/
-static struct folio *__kvm_gmem_get_folio(struct inode *inode, pgoff_t index,
+static struct folio *__kvm_gmem_get_folio(struct file *file, pgoff_t index,
bool allow_huge)
{
struct folio *folio = NULL;
if (gmem_2m_enabled && allow_huge)
- folio = kvm_gmem_get_huge_folio(inode, index, PMD_ORDER);
+ folio = kvm_gmem_get_huge_folio(file_inode(file), index, PMD_ORDER);
if (!folio)
- folio = filemap_grab_folio(inode->i_mapping, index);
+ folio = filemap_grab_folio(file_inode(file)->i_mapping, index);
pr_debug("%s: allocate folio with PFN %lx order %d\n",
__func__, folio_pfn(folio), folio_order(folio));
@@ -150,9 +150,9 @@ static struct folio *__kvm_gmem_get_folio(struct inode *inode, pgoff_t index,
* Ignore accessed, referenced, and dirty flags. The memory is
* unevictable and there is no storage to write back to.
*/
-static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
+static struct folio *kvm_gmem_get_folio(struct file *file, pgoff_t index)
{
- return __kvm_gmem_get_folio(inode, index, true);
+ return __kvm_gmem_get_folio(file, index, true);
}
static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
@@ -228,8 +228,9 @@ static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
return 0;
}
-static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
+static long kvm_gmem_allocate(struct file *file, loff_t offset, loff_t len)
{
+ struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
pgoff_t start, index, end;
int r;
@@ -252,7 +253,7 @@ static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
break;
}
- folio = kvm_gmem_get_folio(inode, index);
+ folio = kvm_gmem_get_folio(file, index);
if (IS_ERR(folio)) {
r = PTR_ERR(folio);
break;
@@ -292,7 +293,7 @@ static long kvm_gmem_fallocate(struct file *file, int mode, loff_t offset,
if (mode & FALLOC_FL_PUNCH_HOLE)
ret = kvm_gmem_punch_hole(file_inode(file), offset, len);
else
- ret = kvm_gmem_allocate(file_inode(file), offset, len);
+ ret = kvm_gmem_allocate(file, offset, len);
if (!ret)
file_modified(file);
@@ -626,7 +627,7 @@ __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
return ERR_PTR(-EIO);
}
- folio = __kvm_gmem_get_folio(file_inode(file), index, allow_huge);
+ folio = __kvm_gmem_get_folio(file, index, allow_huge);
if (IS_ERR(folio))
return folio;
Change the KVM guest_memfd APIs to pass file pointers instead of inodes in the folio allocation functions. This is preparatory patch for adding NUMA support to guest memory allocations. The functional behavior remains unchanged. Signed-off-by: Shivank Garg <shivankg@amd.com> --- virt/kvm/guest_memfd.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-)