diff mbox series

[v2,3/9] mm: Move FAULT_FLAG_VMA_LOCK check from handle_mm_fault()

Message ID 20230711202047.3818697-4-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Avoid the mmap lock for fault-around | expand

Commit Message

Matthew Wilcox July 11, 2023, 8:20 p.m. UTC
Handle a little more of the page fault path outside the mmap sem.
The hugetlb path doesn't need to check whether the VMA is anonymous;
the VM_HUGETLB flag is only set on hugetlbfs VMAs.  There should be no
performance change from the previous commit; this is simply a step to
ease bisection of any problems.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/hugetlb.c |  6 ++++++
 mm/memory.c  | 18 +++++++++---------
 2 files changed, 15 insertions(+), 9 deletions(-)

Comments

Suren Baghdasaryan July 14, 2023, 3:04 a.m. UTC | #1
On Tue, Jul 11, 2023 at 1:20 PM Matthew Wilcox (Oracle)
<willy@infradead.org> wrote:
>
> Handle a little more of the page fault path outside the mmap sem.
> The hugetlb path doesn't need to check whether the VMA is anonymous;
> the VM_HUGETLB flag is only set on hugetlbfs VMAs.  There should be no
> performance change from the previous commit; this is simply a step to
> ease bisection of any problems.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Reviewed-by: Suren Baghdasaryan <surenb@google.com>

> ---
>  mm/hugetlb.c |  6 ++++++
>  mm/memory.c  | 18 +++++++++---------
>  2 files changed, 15 insertions(+), 9 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index e4a28ce0667f..109e1ff92bc8 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -6063,6 +6063,12 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
>         int need_wait_lock = 0;
>         unsigned long haddr = address & huge_page_mask(h);
>
> +       /* TODO: Handle faults under the VMA lock */
> +       if (flags & FAULT_FLAG_VMA_LOCK) {
> +               vma_end_read(vma);
> +               return VM_FAULT_RETRY;
> +       }
> +
>         /*
>          * Serialize hugepage allocation and instantiation, so that we don't
>          * get spurious allocation failures if two CPUs race to instantiate
> diff --git a/mm/memory.c b/mm/memory.c
> index f2dcc695f54e..6eda5c5f2069 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4998,10 +4998,10 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
>  }
>
>  /*
> - * By the time we get here, we already hold the mm semaphore
> - *
> - * The mmap_lock may have been released depending on flags and our
> - * return value.  See filemap_fault() and __folio_lock_or_retry().
> + * On entry, we hold either the VMA lock or the mmap_lock
> + * (FAULT_FLAG_VMA_LOCK tells you which).  If VM_FAULT_RETRY is set in
> + * the result, the mmap_lock is not held on exit.  See filemap_fault()
> + * and __folio_lock_or_retry().
>   */
>  static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
>                 unsigned long address, unsigned int flags)
> @@ -5020,6 +5020,11 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
>         p4d_t *p4d;
>         vm_fault_t ret;
>
> +       if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) {
> +               vma_end_read(vma);
> +               return VM_FAULT_RETRY;
> +       }
> +
>         pgd = pgd_offset(mm, address);
>         p4d = p4d_alloc(mm, pgd, address);
>         if (!p4d)
> @@ -5247,11 +5252,6 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
>                 goto out;
>         }
>
> -       if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) {
> -               vma_end_read(vma);
> -               return VM_FAULT_RETRY;
> -       }
> -
>         /*
>          * Enable the memcg OOM handling for faults triggered in user
>          * space.  Kernel faults are handled more gracefully.
> --
> 2.39.2
>
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e4a28ce0667f..109e1ff92bc8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6063,6 +6063,12 @@  vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 	int need_wait_lock = 0;
 	unsigned long haddr = address & huge_page_mask(h);
 
+	/* TODO: Handle faults under the VMA lock */
+	if (flags & FAULT_FLAG_VMA_LOCK) {
+		vma_end_read(vma);
+		return VM_FAULT_RETRY;
+	}
+
 	/*
 	 * Serialize hugepage allocation and instantiation, so that we don't
 	 * get spurious allocation failures if two CPUs race to instantiate
diff --git a/mm/memory.c b/mm/memory.c
index f2dcc695f54e..6eda5c5f2069 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4998,10 +4998,10 @@  static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
 }
 
 /*
- * By the time we get here, we already hold the mm semaphore
- *
- * The mmap_lock may have been released depending on flags and our
- * return value.  See filemap_fault() and __folio_lock_or_retry().
+ * On entry, we hold either the VMA lock or the mmap_lock
+ * (FAULT_FLAG_VMA_LOCK tells you which).  If VM_FAULT_RETRY is set in
+ * the result, the mmap_lock is not held on exit.  See filemap_fault()
+ * and __folio_lock_or_retry().
  */
 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
 		unsigned long address, unsigned int flags)
@@ -5020,6 +5020,11 @@  static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
 	p4d_t *p4d;
 	vm_fault_t ret;
 
+	if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) {
+		vma_end_read(vma);
+		return VM_FAULT_RETRY;
+	}
+
 	pgd = pgd_offset(mm, address);
 	p4d = p4d_alloc(mm, pgd, address);
 	if (!p4d)
@@ -5247,11 +5252,6 @@  vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 		goto out;
 	}
 
-	if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) {
-		vma_end_read(vma);
-		return VM_FAULT_RETRY;
-	}
-
 	/*
 	 * Enable the memcg OOM handling for faults triggered in user
 	 * space.  Kernel faults are handled more gracefully.