diff mbox series

[v3,2/6] mm: provide can_userfault vma operation

Message ID 20250404154352.23078-3-kalyazin@amazon.com (mailing list archive)
State New
Headers show
Series KVM: guest_memfd: support for uffd minor | expand

Commit Message

Nikita Kalyazin April 4, 2025, 3:43 p.m. UTC
The new operation allows to decouple the userfaulfd code from
dependencies to VMA types, specifically, shmem and hugetlb.  The
vm_flags bitmap argument is processed with "any" logic, meaning if the
VMA type supports any of the flags set, it returns true.  This is to
avoid multiple calls when checking for __VM_UFFD_FLAGS.

Signed-off-by: Nikita Kalyazin <kalyazin@amazon.com>
---
 include/linux/mm.h | 5 +++++
 mm/hugetlb.c       | 7 +++++++
 mm/shmem.c         | 8 ++++++++
 3 files changed, 20 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8483e09aeb2c..488d721d8542 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -680,6 +680,11 @@  struct vm_operations_struct {
 	 */
 	struct page *(*find_special_page)(struct vm_area_struct *vma,
 					  unsigned long addr);
+	/*
+	 * True if the VMA supports userfault at least for one of the vm_flags
+	 */
+	bool (*can_userfault)(struct vm_area_struct *vma,
+			      unsigned long vm_flags);
 };
 
 #ifdef CONFIG_NUMA_BALANCING
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c004cfdcd4e2..f3901c11e1fd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5143,6 +5143,12 @@  static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
 	return huge_page_size(hstate_vma(vma));
 }
 
+static bool hugetlb_vm_op_can_userfault(struct vm_area_struct *vma,
+					unsigned long vm_flags)
+{
+	return true;
+}
+
 /*
  * We cannot handle pagefaults against hugetlb pages at all.  They cause
  * handle_mm_fault() to try to instantiate regular-sized pages in the
@@ -5168,6 +5174,7 @@  const struct vm_operations_struct hugetlb_vm_ops = {
 	.close = hugetlb_vm_op_close,
 	.may_split = hugetlb_vm_op_split,
 	.pagesize = hugetlb_vm_op_pagesize,
+	.can_userfault = hugetlb_vm_op_can_userfault,
 };
 
 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
diff --git a/mm/shmem.c b/mm/shmem.c
index b4159303fe59..0b9e19abd1e9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2891,6 +2891,12 @@  static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
 }
 
+static bool shmem_can_userfault(struct vm_area_struct *vma,
+				unsigned long vm_flags)
+{
+	return true;
+}
+
 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
 			pgoff_t index, unsigned int order, pgoff_t *ilx)
 {
@@ -5309,6 +5315,7 @@  static const struct vm_operations_struct shmem_vm_ops = {
 	.set_policy     = shmem_set_policy,
 	.get_policy     = shmem_get_policy,
 #endif
+	.can_userfault  = shmem_can_userfault,
 };
 
 static const struct vm_operations_struct shmem_anon_vm_ops = {
@@ -5318,6 +5325,7 @@  static const struct vm_operations_struct shmem_anon_vm_ops = {
 	.set_policy     = shmem_set_policy,
 	.get_policy     = shmem_get_policy,
 #endif
+	.can_userfault  = shmem_can_userfault,
 };
 
 int shmem_init_fs_context(struct fs_context *fc)