diff mbox series

[RFC,v4,3/3] KVM: guest_memfd: Enforce NUMA mempolicy using shared policy

Message ID 20250210063227.41125-4-shivankg@amd.com (mailing list archive)
State New
Headers show
Series Add NUMA mempolicy support for KVM guest-memfd | expand

Commit Message

Shivank Garg Feb. 10, 2025, 6:32 a.m. UTC
Previously, guest-memfd allocations were following local NUMA node id
in absence of process mempolicy, resulting in random memory allocation.
Moreover, mbind() couldn't be used since memory wasn't mapped to userspace
in VMM.

Enable NUMA policy support by implementing vm_ops for guest-memfd mmap
operation. This allows VMM to map the memory and use mbind() to set the
desired NUMA policy. The policy is then retrieved via
mpol_shared_policy_lookup() and passed to filemap_grab_folio_mpol() to
ensure that allocations follow the specified memory policy.

This enables VMM to control guest memory NUMA placement by calling mbind()
on the mapped memory regions, providing fine-grained control over guest
memory allocation across NUMA nodes.

Suggested-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Shivank Garg <shivankg@amd.com>
---
 virt/kvm/guest_memfd.c | 84 +++++++++++++++++++++++++++++++++++++++---
 1 file changed, 78 insertions(+), 6 deletions(-)

Comments

David Hildenbrand Feb. 12, 2025, 10:39 a.m. UTC | #1
On 10.02.25 07:32, Shivank Garg wrote:
> Previously, guest-memfd allocations were following local NUMA node id
> in absence of process mempolicy, resulting in random memory allocation.
> Moreover, mbind() couldn't be used since memory wasn't mapped to userspace
> in VMM.
> 
> Enable NUMA policy support by implementing vm_ops for guest-memfd mmap
> operation. This allows VMM to map the memory and use mbind() to set the
> desired NUMA policy. The policy is then retrieved via
> mpol_shared_policy_lookup() and passed to filemap_grab_folio_mpol() to
> ensure that allocations follow the specified memory policy.
> 
> This enables VMM to control guest memory NUMA placement by calling mbind()
> on the mapped memory regions, providing fine-grained control over guest
> memory allocation across NUMA nodes.

Yes, I think that is the right direction, especially with upcoming 
in-place conversion of shared<->private in mind.

> 
> Suggested-by: David Hildenbrand <david@redhat.com>
> Signed-off-by: Shivank Garg <shivankg@amd.com>
> ---
>   virt/kvm/guest_memfd.c | 84 +++++++++++++++++++++++++++++++++++++++---
>   1 file changed, 78 insertions(+), 6 deletions(-)
> 
> diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
> index b2aa6bf24d3a..e1ea8cb292fa 100644
> --- a/virt/kvm/guest_memfd.c
> +++ b/virt/kvm/guest_memfd.c
> @@ -2,6 +2,7 @@
>   #include <linux/backing-dev.h>
>   #include <linux/falloc.h>
>   #include <linux/kvm_host.h>
> +#include <linux/mempolicy.h>
>   #include <linux/pagemap.h>
>   #include <linux/anon_inodes.h>
>   
> @@ -11,8 +12,13 @@ struct kvm_gmem {
>   	struct kvm *kvm;
>   	struct xarray bindings;
>   	struct list_head entry;
> +	struct shared_policy policy;
>   };
>   
> +static struct mempolicy *kvm_gmem_get_pgoff_policy(struct kvm_gmem *gmem,
> +						   pgoff_t index,
> +						   pgoff_t *ilx);
> +
>   /**
>    * folio_file_pfn - like folio_file_page, but return a pfn.
>    * @folio: The folio which contains this index.
> @@ -96,10 +102,20 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
>    * Ignore accessed, referenced, and dirty flags.  The memory is
>    * unevictable and there is no storage to write back to.
>    */
> -static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
> +static struct folio *kvm_gmem_get_folio(struct file *file, pgoff_t index)

I'd probably do that change in a separate prep-patch; would remove some 
of the unrelated noise in this patch.

>   {
>   	/* TODO: Support huge pages. */
> -	return filemap_grab_folio(inode->i_mapping, index);
> +	struct folio *folio = NULL;

No need to init folio.

> +	struct inode *inode = file_inode(file);
> +	struct kvm_gmem *gmem = file->private_data;

Prefer reverse christmas-tree (longest line first) as possible.

> +	struct mempolicy *policy;
> +	pgoff_t ilx;

Why do you return the ilx from kvm_gmem_get_pgoff_policy() if it is 
completely unused?

> +
> +	policy = kvm_gmem_get_pgoff_policy(gmem, index, &ilx);
> +	folio =  filemap_grab_folio_mpol(inode->i_mapping, index, policy);
> +	mpol_cond_put(policy);

The downside is that we always have to lookup the policy, even if we 
don't have to allocate anything because the pagecache already contains a 
folio.

Would there be a way to lookup if there is something already allcoated 
(fast-path) and fallback to the slow-path (lookup policy+call 
filemap_grab_folio_mpol) only if that failed?

Note that shmem.c does exactly that: shmem_alloc_folio() is only called 
after filemap_get_entry() told us that there is nothing.

> +
> +	return folio;
>   }
>   

[...]

> +#ifdef CONFIG_NUMA
> +static int kvm_gmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
> +{
> +	struct file *file = vma->vm_file;
> +	struct kvm_gmem *gmem = file->private_data;
> +
> +	return mpol_set_shared_policy(&gmem->policy, vma, new);
> +}
> +
> +static struct mempolicy *kvm_gmem_get_policy(struct vm_area_struct *vma,
> +		unsigned long addr, pgoff_t *pgoff)
> +{
> +	struct file *file = vma->vm_file;
> +	struct kvm_gmem *gmem = file->private_data;
> +
> +	*pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
> +	return mpol_shared_policy_lookup(&gmem->policy, *pgoff);
> +}
> +
> +static struct mempolicy *kvm_gmem_get_pgoff_policy(struct kvm_gmem *gmem,
> +						   pgoff_t index,
> +						   pgoff_t *ilx)
> +{
> +	struct mempolicy *mpol;
> +
> +	*ilx = NO_INTERLEAVE_INDEX;
> +	mpol = mpol_shared_policy_lookup(&gmem->policy, index);
> +	return mpol ? mpol : get_task_policy(current);
> +}
> +
> +static const struct vm_operations_struct kvm_gmem_vm_ops = {
> +	.get_policy	= kvm_gmem_get_policy,
> +	.set_policy	= kvm_gmem_set_policy,
> +};
> +
> +static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma)
> +{
> +	file_accessed(file);
> +	vma->vm_ops = &kvm_gmem_vm_ops;
> +	return 0;
> +}
> +#else
> +static struct mempolicy *kvm_gmem_get_pgoff_policy(struct kvm_gmem *gmem,
> +						   pgoff_t index,
> +						   pgoff_t *ilx)
> +{
> +	*ilx = 0;
> +	return NULL;
> +}
> +#endif /* CONFIG_NUMA */
>   
>   static struct file_operations kvm_gmem_fops = {
> +#ifdef CONFIG_NUMA
> +	.mmap		= kvm_gmem_mmap,
> +#endif

With Fuad's work, this will be unconditional, and you'd only set the 
kvm_gmem_vm_ops conditionally -- just like shmem.c. Maybe best to 
prepare for that already: allow unconditional mmap (Fuad will implement 
the faulting logic of shared pages, until then all accesses would SIGBUS 
I assume, did you try that?) and only mess with get_policy/set_policy.
Shivank Garg Feb. 13, 2025, 6:27 p.m. UTC | #2
On 2/12/2025 4:09 PM, David Hildenbrand wrote:
> On 10.02.25 07:32, Shivank Garg wrote:
>> Previously, guest-memfd allocations were following local NUMA node id
>> in absence of process mempolicy, resulting in random memory allocation.
>> Moreover, mbind() couldn't be used since memory wasn't mapped to userspace
>> in VMM.
>>
>> Enable NUMA policy support by implementing vm_ops for guest-memfd mmap
>> operation. This allows VMM to map the memory and use mbind() to set the
>> desired NUMA policy. The policy is then retrieved via
>> mpol_shared_policy_lookup() and passed to filemap_grab_folio_mpol() to
>> ensure that allocations follow the specified memory policy.
>>
>> This enables VMM to control guest memory NUMA placement by calling mbind()
>> on the mapped memory regions, providing fine-grained control over guest
>> memory allocation across NUMA nodes.
> 
> Yes, I think that is the right direction, especially with upcoming in-place conversion of shared<->private in mind.
> 
>>
>> Suggested-by: David Hildenbrand <david@redhat.com>
>> Signed-off-by: Shivank Garg <shivankg@amd.com>
>> ---
>>   virt/kvm/guest_memfd.c | 84 +++++++++++++++++++++++++++++++++++++++---
>>   1 file changed, 78 insertions(+), 6 deletions(-)
>>
>> diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
>> index b2aa6bf24d3a..e1ea8cb292fa 100644
>> --- a/virt/kvm/guest_memfd.c
>> +++ b/virt/kvm/guest_memfd.c
>> @@ -2,6 +2,7 @@
>>   #include <linux/backing-dev.h>
>>   #include <linux/falloc.h>
>>   #include <linux/kvm_host.h>
>> +#include <linux/mempolicy.h>
>>   #include <linux/pagemap.h>
>>   #include <linux/anon_inodes.h>
>>   @@ -11,8 +12,13 @@ struct kvm_gmem {
>>       struct kvm *kvm;
>>       struct xarray bindings;
>>       struct list_head entry;
>> +    struct shared_policy policy;
>>   };
>>   +static struct mempolicy *kvm_gmem_get_pgoff_policy(struct kvm_gmem *gmem,
>> +                           pgoff_t index,
>> +                           pgoff_t *ilx);
>> +
>>   /**
>>    * folio_file_pfn - like folio_file_page, but return a pfn.
>>    * @folio: The folio which contains this index.
>> @@ -96,10 +102,20 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
>>    * Ignore accessed, referenced, and dirty flags.  The memory is
>>    * unevictable and there is no storage to write back to.
>>    */
>> -static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
>> +static struct folio *kvm_gmem_get_folio(struct file *file, pgoff_t index)
> 
> I'd probably do that change in a separate prep-patch; would remove some of the unrelated noise in this patch.

Yes, I'll separate it.
> 
>>   {
>>       /* TODO: Support huge pages. */
>> -    return filemap_grab_folio(inode->i_mapping, index);
>> +    struct folio *folio = NULL;
> 
> No need to init folio.
> 
>> +    struct inode *inode = file_inode(file);
>> +    struct kvm_gmem *gmem = file->private_data;
> 
> Prefer reverse christmas-tree (longest line first) as possible.
> 
>> +    struct mempolicy *policy;
>> +    pgoff_t ilx;
> 
> Why do you return the ilx from kvm_gmem_get_pgoff_policy() if it is completely unused?
> 
>> +
>> +    policy = kvm_gmem_get_pgoff_policy(gmem, index, &ilx);
>> +    folio =  filemap_grab_folio_mpol(inode->i_mapping, index, policy);
>> +    mpol_cond_put(policy);
> 

I'll remove the kvm_gmem_get_pgoff_policy.

> The downside is that we always have to lookup the policy, even if we don't have to allocate anything because the pagecache already contains a folio.
> 
> Would there be a way to lookup if there is something already allcoated (fast-path) and fallback to the slow-path (lookup policy+call filemap_grab_folio_mpol) only if that failed?
> 
> Note that shmem.c does exactly that: shmem_alloc_folio() is only called after filemap_get_entry() told us that there is nothing.
> 
Yes, It's doable.
A filemap_get_folio() for fast-path: If it does not return folio, then falling back to current slowpath.

>> +
>> +    return folio;
...

>> +}
>> +#endif /* CONFIG_NUMA */
>>     static struct file_operations kvm_gmem_fops = {
>> +#ifdef CONFIG_NUMA
>> +    .mmap        = kvm_gmem_mmap,
>> +#endif
> 
> With Fuad's work, this will be unconditional, and you'd only set the kvm_gmem_vm_ops conditionally -- just like shmem.c. Maybe best to prepare for that already: allow unconditional mmap (Fuad will implement the faulting logic of shared pages, until then all accesses would SIGBUS I assume, did you try that?) and only mess with get_policy/set_policy.

Yes, I'll change according to it.
I have to try that out.

Thanks,
Shivank
diff mbox series

Patch

diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index b2aa6bf24d3a..e1ea8cb292fa 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -2,6 +2,7 @@ 
 #include <linux/backing-dev.h>
 #include <linux/falloc.h>
 #include <linux/kvm_host.h>
+#include <linux/mempolicy.h>
 #include <linux/pagemap.h>
 #include <linux/anon_inodes.h>
 
@@ -11,8 +12,13 @@  struct kvm_gmem {
 	struct kvm *kvm;
 	struct xarray bindings;
 	struct list_head entry;
+	struct shared_policy policy;
 };
 
+static struct mempolicy *kvm_gmem_get_pgoff_policy(struct kvm_gmem *gmem,
+						   pgoff_t index,
+						   pgoff_t *ilx);
+
 /**
  * folio_file_pfn - like folio_file_page, but return a pfn.
  * @folio: The folio which contains this index.
@@ -96,10 +102,20 @@  static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
  * Ignore accessed, referenced, and dirty flags.  The memory is
  * unevictable and there is no storage to write back to.
  */
-static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
+static struct folio *kvm_gmem_get_folio(struct file *file, pgoff_t index)
 {
 	/* TODO: Support huge pages. */
-	return filemap_grab_folio(inode->i_mapping, index);
+	struct folio *folio = NULL;
+	struct inode *inode = file_inode(file);
+	struct kvm_gmem *gmem = file->private_data;
+	struct mempolicy *policy;
+	pgoff_t ilx;
+
+	policy = kvm_gmem_get_pgoff_policy(gmem, index, &ilx);
+	folio =  filemap_grab_folio_mpol(inode->i_mapping, index, policy);
+	mpol_cond_put(policy);
+
+	return folio;
 }
 
 static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
@@ -177,8 +193,9 @@  static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 	return 0;
 }
 
-static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
+static long kvm_gmem_allocate(struct file *file, loff_t offset, loff_t len)
 {
+	struct inode *inode = file_inode(file);
 	struct address_space *mapping = inode->i_mapping;
 	pgoff_t start, index, end;
 	int r;
@@ -201,7 +218,7 @@  static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
 			break;
 		}
 
-		folio = kvm_gmem_get_folio(inode, index);
+		folio = kvm_gmem_get_folio(file, index);
 		if (IS_ERR(folio)) {
 			r = PTR_ERR(folio);
 			break;
@@ -241,7 +258,7 @@  static long kvm_gmem_fallocate(struct file *file, int mode, loff_t offset,
 	if (mode & FALLOC_FL_PUNCH_HOLE)
 		ret = kvm_gmem_punch_hole(file_inode(file), offset, len);
 	else
-		ret = kvm_gmem_allocate(file_inode(file), offset, len);
+		ret = kvm_gmem_allocate(file, offset, len);
 
 	if (!ret)
 		file_modified(file);
@@ -290,6 +307,7 @@  static int kvm_gmem_release(struct inode *inode, struct file *file)
 	mutex_unlock(&kvm->slots_lock);
 
 	xa_destroy(&gmem->bindings);
+	mpol_free_shared_policy(&gmem->policy);
 	kfree(gmem);
 
 	kvm_put_kvm(kvm);
@@ -311,8 +329,61 @@  static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn)
 {
 	return gfn - slot->base_gfn + slot->gmem.pgoff;
 }
+#ifdef CONFIG_NUMA
+static int kvm_gmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
+{
+	struct file *file = vma->vm_file;
+	struct kvm_gmem *gmem = file->private_data;
+
+	return mpol_set_shared_policy(&gmem->policy, vma, new);
+}
+
+static struct mempolicy *kvm_gmem_get_policy(struct vm_area_struct *vma,
+		unsigned long addr, pgoff_t *pgoff)
+{
+	struct file *file = vma->vm_file;
+	struct kvm_gmem *gmem = file->private_data;
+
+	*pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
+	return mpol_shared_policy_lookup(&gmem->policy, *pgoff);
+}
+
+static struct mempolicy *kvm_gmem_get_pgoff_policy(struct kvm_gmem *gmem,
+						   pgoff_t index,
+						   pgoff_t *ilx)
+{
+	struct mempolicy *mpol;
+
+	*ilx = NO_INTERLEAVE_INDEX;
+	mpol = mpol_shared_policy_lookup(&gmem->policy, index);
+	return mpol ? mpol : get_task_policy(current);
+}
+
+static const struct vm_operations_struct kvm_gmem_vm_ops = {
+	.get_policy	= kvm_gmem_get_policy,
+	.set_policy	= kvm_gmem_set_policy,
+};
+
+static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	file_accessed(file);
+	vma->vm_ops = &kvm_gmem_vm_ops;
+	return 0;
+}
+#else
+static struct mempolicy *kvm_gmem_get_pgoff_policy(struct kvm_gmem *gmem,
+						   pgoff_t index,
+						   pgoff_t *ilx)
+{
+	*ilx = 0;
+	return NULL;
+}
+#endif /* CONFIG_NUMA */
 
 static struct file_operations kvm_gmem_fops = {
+#ifdef CONFIG_NUMA
+	.mmap		= kvm_gmem_mmap,
+#endif
 	.open		= generic_file_open,
 	.release	= kvm_gmem_release,
 	.fallocate	= kvm_gmem_fallocate,
@@ -445,6 +516,7 @@  static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
 	kvm_get_kvm(kvm);
 	gmem->kvm = kvm;
 	xa_init(&gmem->bindings);
+	mpol_shared_policy_init(&gmem->policy, NULL);
 	list_add(&gmem->entry, &inode->i_mapping->i_private_list);
 
 	fd_install(fd, file);
@@ -585,7 +657,7 @@  static struct folio *__kvm_gmem_get_pfn(struct file *file,
 		return ERR_PTR(-EIO);
 	}
 
-	folio = kvm_gmem_get_folio(file_inode(file), index);
+	folio = kvm_gmem_get_folio(file, index);
 	if (IS_ERR(folio))
 		return folio;