diff mbox series

[v3,3/8] mm: drop per-VMA lock in handle_mm_fault if retrying or when finished

Message ID 20230627042321.1763765-4-surenb@google.com (mailing list archive)
State New
Headers show
Series Per-VMA lock support for swap and userfaults | expand

Commit Message

Suren Baghdasaryan June 27, 2023, 4:23 a.m. UTC
handle_mm_fault returning VM_FAULT_RETRY or VM_FAULT_COMPLETED means
mmap_lock has been released. However with per-VMA locks behavior is
different and the caller should still release it. To make the
rules consistent for the caller, drop the per-VMA lock before returning
from handle_mm_fault when page fault should be retried or is completed.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 arch/arm64/mm/fault.c   |  3 ++-
 arch/powerpc/mm/fault.c |  3 ++-
 arch/s390/mm/fault.c    |  3 ++-
 arch/x86/mm/fault.c     |  3 ++-
 mm/memory.c             | 12 +++++++++++-
 5 files changed, 19 insertions(+), 5 deletions(-)

Comments

Peter Xu June 27, 2023, 3:27 p.m. UTC | #1
On Mon, Jun 26, 2023 at 09:23:16PM -0700, Suren Baghdasaryan wrote:
> handle_mm_fault returning VM_FAULT_RETRY or VM_FAULT_COMPLETED means
> mmap_lock has been released. However with per-VMA locks behavior is
> different and the caller should still release it. To make the
> rules consistent for the caller, drop the per-VMA lock before returning
> from handle_mm_fault when page fault should be retried or is completed.
> 
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
>  arch/arm64/mm/fault.c   |  3 ++-
>  arch/powerpc/mm/fault.c |  3 ++-
>  arch/s390/mm/fault.c    |  3 ++-
>  arch/x86/mm/fault.c     |  3 ++-
>  mm/memory.c             | 12 +++++++++++-
>  5 files changed, 19 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
> index 6045a5117ac1..89f84e9ea1ff 100644
> --- a/arch/arm64/mm/fault.c
> +++ b/arch/arm64/mm/fault.c
> @@ -601,7 +601,8 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
>  		goto lock_mmap;
>  	}
>  	fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
> -	vma_end_read(vma);
> +	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
> +		vma_end_read(vma);
>  
>  	if (!(fault & VM_FAULT_RETRY)) {
>  		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
> diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
> index 531177a4ee08..4697c5dca31c 100644
> --- a/arch/powerpc/mm/fault.c
> +++ b/arch/powerpc/mm/fault.c
> @@ -494,7 +494,8 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
>  	}
>  
>  	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
> -	vma_end_read(vma);
> +	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
> +		vma_end_read(vma);
>  
>  	if (!(fault & VM_FAULT_RETRY)) {
>  		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
> diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
> index b65144c392b0..cccefe41038b 100644
> --- a/arch/s390/mm/fault.c
> +++ b/arch/s390/mm/fault.c
> @@ -418,7 +418,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
>  		goto lock_mmap;
>  	}
>  	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
> -	vma_end_read(vma);
> +	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
> +		vma_end_read(vma);
>  	if (!(fault & VM_FAULT_RETRY)) {
>  		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
>  		goto out;
> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
> index e4399983c50c..d69c85c1c04e 100644
> --- a/arch/x86/mm/fault.c
> +++ b/arch/x86/mm/fault.c
> @@ -1347,7 +1347,8 @@ void do_user_addr_fault(struct pt_regs *regs,
>  		goto lock_mmap;
>  	}
>  	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
> -	vma_end_read(vma);
> +	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
> +		vma_end_read(vma);
>  
>  	if (!(fault & VM_FAULT_RETRY)) {
>  		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
> diff --git a/mm/memory.c b/mm/memory.c
> index f69fbc251198..9011ad63c41b 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -5086,7 +5086,17 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
>  		}
>  	}
>  
> -	return handle_pte_fault(&vmf);
> +	ret = handle_pte_fault(&vmf);
> +	if (ret & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)) {
> +		/*
> +		 * In case of VM_FAULT_RETRY or VM_FAULT_COMPLETED we might
> +		 * be still holding per-VMA lock to keep the vma stable as long
> +		 * as possible. Drop it before returning.
> +		 */
> +		if (vmf.flags & FAULT_FLAG_VMA_LOCK)
> +			vma_end_read(vma);
> +	}

This smells hackish.. I'd think better we just release the lock at the
place where we'll return RETRY, and AFAIU swap is the only place vma lock
returns a RETRY with current code base?

do_swap_page():
        if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
+               vma_end_read(vma);
                ret = VM_FAULT_RETRY;
                goto out;
        }

I.e., I don't think VM_FAULT_COMPLETED can even be returned with vma lock
paths yet as it doesn't yet support VM_SHARED.
Suren Baghdasaryan June 27, 2023, 4:25 p.m. UTC | #2
On Tue, Jun 27, 2023 at 8:28 AM Peter Xu <peterx@redhat.com> wrote:
>
> On Mon, Jun 26, 2023 at 09:23:16PM -0700, Suren Baghdasaryan wrote:
> > handle_mm_fault returning VM_FAULT_RETRY or VM_FAULT_COMPLETED means
> > mmap_lock has been released. However with per-VMA locks behavior is
> > different and the caller should still release it. To make the
> > rules consistent for the caller, drop the per-VMA lock before returning
> > from handle_mm_fault when page fault should be retried or is completed.
> >
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > ---
> >  arch/arm64/mm/fault.c   |  3 ++-
> >  arch/powerpc/mm/fault.c |  3 ++-
> >  arch/s390/mm/fault.c    |  3 ++-
> >  arch/x86/mm/fault.c     |  3 ++-
> >  mm/memory.c             | 12 +++++++++++-
> >  5 files changed, 19 insertions(+), 5 deletions(-)
> >
> > diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
> > index 6045a5117ac1..89f84e9ea1ff 100644
> > --- a/arch/arm64/mm/fault.c
> > +++ b/arch/arm64/mm/fault.c
> > @@ -601,7 +601,8 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
> >               goto lock_mmap;
> >       }
> >       fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
> > -     vma_end_read(vma);
> > +     if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
> > +             vma_end_read(vma);
> >
> >       if (!(fault & VM_FAULT_RETRY)) {
> >               count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
> > diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
> > index 531177a4ee08..4697c5dca31c 100644
> > --- a/arch/powerpc/mm/fault.c
> > +++ b/arch/powerpc/mm/fault.c
> > @@ -494,7 +494,8 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
> >       }
> >
> >       fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
> > -     vma_end_read(vma);
> > +     if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
> > +             vma_end_read(vma);
> >
> >       if (!(fault & VM_FAULT_RETRY)) {
> >               count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
> > diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
> > index b65144c392b0..cccefe41038b 100644
> > --- a/arch/s390/mm/fault.c
> > +++ b/arch/s390/mm/fault.c
> > @@ -418,7 +418,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
> >               goto lock_mmap;
> >       }
> >       fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
> > -     vma_end_read(vma);
> > +     if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
> > +             vma_end_read(vma);
> >       if (!(fault & VM_FAULT_RETRY)) {
> >               count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
> >               goto out;
> > diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
> > index e4399983c50c..d69c85c1c04e 100644
> > --- a/arch/x86/mm/fault.c
> > +++ b/arch/x86/mm/fault.c
> > @@ -1347,7 +1347,8 @@ void do_user_addr_fault(struct pt_regs *regs,
> >               goto lock_mmap;
> >       }
> >       fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
> > -     vma_end_read(vma);
> > +     if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
> > +             vma_end_read(vma);
> >
> >       if (!(fault & VM_FAULT_RETRY)) {
> >               count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
> > diff --git a/mm/memory.c b/mm/memory.c
> > index f69fbc251198..9011ad63c41b 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -5086,7 +5086,17 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
> >               }
> >       }
> >
> > -     return handle_pte_fault(&vmf);
> > +     ret = handle_pte_fault(&vmf);
> > +     if (ret & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)) {
> > +             /*
> > +              * In case of VM_FAULT_RETRY or VM_FAULT_COMPLETED we might
> > +              * be still holding per-VMA lock to keep the vma stable as long
> > +              * as possible. Drop it before returning.
> > +              */
> > +             if (vmf.flags & FAULT_FLAG_VMA_LOCK)
> > +                     vma_end_read(vma);
> > +     }
>
> This smells hackish.. I'd think better we just release the lock at the
> place where we'll return RETRY, and AFAIU swap is the only place vma lock
> returns a RETRY with current code base?
>
> do_swap_page():
>         if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
> +               vma_end_read(vma);
>                 ret = VM_FAULT_RETRY;
>                 goto out;
>         }
>
> I.e., I don't think VM_FAULT_COMPLETED can even be returned with vma lock
> paths yet as it doesn't yet support VM_SHARED.

Ack.

>
> --
> Peter Xu
>
diff mbox series

Patch

diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 6045a5117ac1..89f84e9ea1ff 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -601,7 +601,8 @@  static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
 		goto lock_mmap;
 	}
 	fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
-	vma_end_read(vma);
+	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+		vma_end_read(vma);
 
 	if (!(fault & VM_FAULT_RETRY)) {
 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 531177a4ee08..4697c5dca31c 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -494,7 +494,8 @@  static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
 	}
 
 	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
-	vma_end_read(vma);
+	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+		vma_end_read(vma);
 
 	if (!(fault & VM_FAULT_RETRY)) {
 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index b65144c392b0..cccefe41038b 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -418,7 +418,8 @@  static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
 		goto lock_mmap;
 	}
 	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
-	vma_end_read(vma);
+	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+		vma_end_read(vma);
 	if (!(fault & VM_FAULT_RETRY)) {
 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
 		goto out;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index e4399983c50c..d69c85c1c04e 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1347,7 +1347,8 @@  void do_user_addr_fault(struct pt_regs *regs,
 		goto lock_mmap;
 	}
 	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
-	vma_end_read(vma);
+	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+		vma_end_read(vma);
 
 	if (!(fault & VM_FAULT_RETRY)) {
 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
diff --git a/mm/memory.c b/mm/memory.c
index f69fbc251198..9011ad63c41b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5086,7 +5086,17 @@  static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
 		}
 	}
 
-	return handle_pte_fault(&vmf);
+	ret = handle_pte_fault(&vmf);
+	if (ret & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)) {
+		/*
+		 * In case of VM_FAULT_RETRY or VM_FAULT_COMPLETED we might
+		 * be still holding per-VMA lock to keep the vma stable as long
+		 * as possible. Drop it before returning.
+		 */
+		if (vmf.flags & FAULT_FLAG_VMA_LOCK)
+			vma_end_read(vma);
+	}
+	return ret;
 }
 
 /**