diff mbox series

[RFC] Trylock the mmap_lock in vmf_anon_prepare()

Message ID Zh1GyNht6KKyRMuc@casper.infradead.org (mailing list archive)
State New
Headers show
Series [RFC] Trylock the mmap_lock in vmf_anon_prepare() | expand

Commit Message

Matthew Wilcox April 15, 2024, 3:24 p.m. UTC
It seems a shame to fallback if the mmap_lock is readily available.
It doesn't blow up immediately in my testing ... thoughts?

From 70833af858ce41a80b5f0669648ee9f59b149a03 Mon Sep 17 00:00:00 2001
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Date: Mon, 15 Apr 2024 11:13:41 -0400
Subject: [PATCH] mm: Optimise vmf_anon_prepare() for VMAs without an anon_vma

If the mmap_lock can be taken for read, there's no reason not to take
it so that we can call __anon_vma_prepare().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/memory.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

Comments

Suren Baghdasaryan April 15, 2024, 4:14 p.m. UTC | #1
On Mon, Apr 15, 2024 at 8:24 AM Matthew Wilcox <willy@infradead.org> wrote:
>
> It seems a shame to fallback if the mmap_lock is readily available.
> It doesn't blow up immediately in my testing ... thoughts?

I can't think of anything that would break...

>
> From 70833af858ce41a80b5f0669648ee9f59b149a03 Mon Sep 17 00:00:00 2001
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> Date: Mon, 15 Apr 2024 11:13:41 -0400
> Subject: [PATCH] mm: Optimise vmf_anon_prepare() for VMAs without an anon_vma
>
> If the mmap_lock can be taken for read, there's no reason not to take
> it so that we can call __anon_vma_prepare().
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  mm/memory.c | 8 ++++++--
>  1 file changed, 6 insertions(+), 2 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index b9c23393fa9b..7075fb7f74da 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3219,11 +3219,15 @@ vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
>         if (likely(vma->anon_vma))
>                 return 0;
>         if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
> -               vma_end_read(vma);
> -               return VM_FAULT_RETRY;
> +               if (!mmap_read_trylock(vma->vm_mm)) {
> +                       vma_end_read(vma);
> +                       return VM_FAULT_RETRY;
> +               }
>         }
>         if (__anon_vma_prepare(vma))
>                 return VM_FAULT_OOM;

You should drop mmap_lock when returning VM_FAULT_OOM as well.

> +       if (vmf->flags & FAULT_FLAG_VMA_LOCK)
> +               mmap_read_unlock(vma->vm_mm);
>         return 0;
>  }
>
> --
> 2.43.0
>
>
Jann Horn April 15, 2024, 4:45 p.m. UTC | #2
On Mon, Apr 15, 2024 at 5:24 PM Matthew Wilcox <willy@infradead.org> wrote:
> It seems a shame to fallback if the mmap_lock is readily available.
> It doesn't blow up immediately in my testing ... thoughts?

Seems fine to me, since nothing below the trylocked mmap_lock does
vma_start_*().

> >From 70833af858ce41a80b5f0669648ee9f59b149a03 Mon Sep 17 00:00:00 2001
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> Date: Mon, 15 Apr 2024 11:13:41 -0400
> Subject: [PATCH] mm: Optimise vmf_anon_prepare() for VMAs without an anon_vma
>
> If the mmap_lock can be taken for read, there's no reason not to take
> it so that we can call __anon_vma_prepare().
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Reviewed-by: Jann Horn <jannh@google.com>

> ---
>  mm/memory.c | 8 ++++++--
>  1 file changed, 6 insertions(+), 2 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index b9c23393fa9b..7075fb7f74da 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3219,11 +3219,15 @@ vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
>         if (likely(vma->anon_vma))
>                 return 0;
>         if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
> -               vma_end_read(vma);
> -               return VM_FAULT_RETRY;
> +               if (!mmap_read_trylock(vma->vm_mm)) {
> +                       vma_end_read(vma);
> +                       return VM_FAULT_RETRY;
> +               }
>         }
>         if (__anon_vma_prepare(vma))
>                 return VM_FAULT_OOM;
> +       if (vmf->flags & FAULT_FLAG_VMA_LOCK)
> +               mmap_read_unlock(vma->vm_mm);
>         return 0;
>  }
>
> --
> 2.43.0
Matthew Wilcox April 15, 2024, 6:18 p.m. UTC | #3
On Mon, Apr 15, 2024 at 09:14:10AM -0700, Suren Baghdasaryan wrote:
> >         if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
> > -               vma_end_read(vma);
> > -               return VM_FAULT_RETRY;
> > +               if (!mmap_read_trylock(vma->vm_mm)) {
> > +                       vma_end_read(vma);
> > +                       return VM_FAULT_RETRY;
> > +               }
> >         }
> >         if (__anon_vma_prepare(vma))
> >                 return VM_FAULT_OOM;
> 
> You should drop mmap_lock when returning VM_FAULT_OOM as well.
> 
> > +       if (vmf->flags & FAULT_FLAG_VMA_LOCK)
> > +               mmap_read_unlock(vma->vm_mm);
> >         return 0;
> >  }

Thanks.  Fixed and pushed to
git://git.infradead.org/users/willy/pagecache.git vma-lock

+++ b/mm/memory.c
@@ -3224,16 +3224,21 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
 vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
+       vm_fault_t ret = 0;

        if (likely(vma->anon_vma))
                return 0;
        if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
-               vma_end_read(vma);
-               return VM_FAULT_RETRY;
+               if (!mmap_read_trylock(vma->vm_mm)) {
+                       vma_end_read(vma);
+                       return VM_FAULT_RETRY;
+               }
        }
        if (__anon_vma_prepare(vma))
-               return VM_FAULT_OOM;
-       return 0;
+               ret = VM_FAULT_OOM;
+       if (vmf->flags & FAULT_FLAG_VMA_LOCK)
+               mmap_read_unlock(vma->vm_mm);
+       return ret;
 }

 /*
Suren Baghdasaryan April 15, 2024, 8:59 p.m. UTC | #4
On Mon, Apr 15, 2024 at 11:18 AM Matthew Wilcox <willy@infradead.org> wrote:
>
> On Mon, Apr 15, 2024 at 09:14:10AM -0700, Suren Baghdasaryan wrote:
> > >         if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
> > > -               vma_end_read(vma);
> > > -               return VM_FAULT_RETRY;
> > > +               if (!mmap_read_trylock(vma->vm_mm)) {
> > > +                       vma_end_read(vma);
> > > +                       return VM_FAULT_RETRY;
> > > +               }
> > >         }
> > >         if (__anon_vma_prepare(vma))
> > >                 return VM_FAULT_OOM;
> >
> > You should drop mmap_lock when returning VM_FAULT_OOM as well.
> >
> > > +       if (vmf->flags & FAULT_FLAG_VMA_LOCK)
> > > +               mmap_read_unlock(vma->vm_mm);
> > >         return 0;
> > >  }
>
> Thanks.  Fixed and pushed to
> git://git.infradead.org/users/willy/pagecache.git vma-lock

That looks correct now.
Reviewed-by: Suren Baghdasaryan <surenb@google.com>

>
> +++ b/mm/memory.c
> @@ -3224,16 +3224,21 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
>  vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
>  {
>         struct vm_area_struct *vma = vmf->vma;
> +       vm_fault_t ret = 0;
>
>         if (likely(vma->anon_vma))
>                 return 0;
>         if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
> -               vma_end_read(vma);
> -               return VM_FAULT_RETRY;
> +               if (!mmap_read_trylock(vma->vm_mm)) {
> +                       vma_end_read(vma);
> +                       return VM_FAULT_RETRY;
> +               }
>         }
>         if (__anon_vma_prepare(vma))
> -               return VM_FAULT_OOM;
> -       return 0;
> +               ret = VM_FAULT_OOM;
> +       if (vmf->flags & FAULT_FLAG_VMA_LOCK)
> +               mmap_read_unlock(vma->vm_mm);
> +       return ret;
>  }
>
>  /*
>
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index b9c23393fa9b..7075fb7f74da 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3219,11 +3219,15 @@  vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
 	if (likely(vma->anon_vma))
 		return 0;
 	if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
-		vma_end_read(vma);
-		return VM_FAULT_RETRY;
+		if (!mmap_read_trylock(vma->vm_mm)) {
+			vma_end_read(vma);
+			return VM_FAULT_RETRY;
+		}
 	}
 	if (__anon_vma_prepare(vma))
 		return VM_FAULT_OOM;
+	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
+		mmap_read_unlock(vma->vm_mm);
 	return 0;
 }