diff mbox series

[RFC,RESEND,09/28] mm/mempolicy: mark VMA as locked when changing protection policy

Message ID 20220901173516.702122-10-surenb@google.com (mailing list archive)
State New, archived
Headers show
Series per-VMA locks proposal | expand

Commit Message

Suren Baghdasaryan Sept. 1, 2022, 5:34 p.m. UTC
Protect VMA from concurrent page fault handler while performing VMA
protection policy changes.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 mm/mempolicy.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

Comments

Laurent Dufour Sept. 6, 2022, 2:47 p.m. UTC | #1
Le 01/09/2022 à 19:34, Suren Baghdasaryan a écrit :
> Protect VMA from concurrent page fault handler while performing VMA
> protection policy changes.
> 
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
>  mm/mempolicy.c | 6 +++++-
>  1 file changed, 5 insertions(+), 1 deletion(-)
> 
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index b73d3248d976..6be1e5c75556 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -383,8 +383,10 @@ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
>  	struct vm_area_struct *vma;
>  
>  	mmap_write_lock(mm);
> -	for (vma = mm->mmap; vma; vma = vma->vm_next)
> +	for (vma = mm->mmap; vma; vma = vma->vm_next) {
> +		vma_mark_locked(vma);
>  		mpol_rebind_policy(vma->vm_policy, new);
> +	}
>  	mmap_write_unlock(mm);
>  }
>  
> @@ -632,6 +634,7 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
>  	struct mmu_gather tlb;
>  	int nr_updated;
>  
> +	vma_mark_locked(vma);

If I understand that corretly, the VMA itself is not impacted, only the
PMDs/PTEs, and they are protected using the page table locks.

Am I missing something?

>  	tlb_gather_mmu(&tlb, vma->vm_mm);
>  
>  	nr_updated = change_protection(&tlb, vma, addr, end, PAGE_NONE,
> @@ -765,6 +768,7 @@ static int vma_replace_policy(struct vm_area_struct *vma,
>  	if (IS_ERR(new))
>  		return PTR_ERR(new);
>  
> +	vma_mark_locked(vma);
>  	if (vma->vm_ops && vma->vm_ops->set_policy) {
>  		err = vma->vm_ops->set_policy(vma, new);
>  		if (err)
Suren Baghdasaryan Sept. 9, 2022, 12:27 a.m. UTC | #2
On Tue, Sep 6, 2022 at 7:48 AM Laurent Dufour <ldufour@linux.ibm.com> wrote:
>
> Le 01/09/2022 à 19:34, Suren Baghdasaryan a écrit :
> > Protect VMA from concurrent page fault handler while performing VMA
> > protection policy changes.
> >
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > ---
> >  mm/mempolicy.c | 6 +++++-
> >  1 file changed, 5 insertions(+), 1 deletion(-)
> >
> > diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> > index b73d3248d976..6be1e5c75556 100644
> > --- a/mm/mempolicy.c
> > +++ b/mm/mempolicy.c
> > @@ -383,8 +383,10 @@ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
> >       struct vm_area_struct *vma;
> >
> >       mmap_write_lock(mm);
> > -     for (vma = mm->mmap; vma; vma = vma->vm_next)
> > +     for (vma = mm->mmap; vma; vma = vma->vm_next) {
> > +             vma_mark_locked(vma);
> >               mpol_rebind_policy(vma->vm_policy, new);
> > +     }
> >       mmap_write_unlock(mm);
> >  }
> >
> > @@ -632,6 +634,7 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
> >       struct mmu_gather tlb;
> >       int nr_updated;
> >
> > +     vma_mark_locked(vma);
>
> If I understand that corretly, the VMA itself is not impacted, only the
> PMDs/PTEs, and they are protected using the page table locks.
>
> Am I missing something?

I thought we would not want pages faulting in the VMA for which we are
changing the protection. However I think what you are saying is that
page table locks would already provide a more granular synchronization
with page fault handlers, which makes sense to me. Sounds like we can
skip locking the VMA here as well. Nice!

>
> >       tlb_gather_mmu(&tlb, vma->vm_mm);
> >
> >       nr_updated = change_protection(&tlb, vma, addr, end, PAGE_NONE,
> > @@ -765,6 +768,7 @@ static int vma_replace_policy(struct vm_area_struct *vma,
> >       if (IS_ERR(new))
> >               return PTR_ERR(new);
> >
> > +     vma_mark_locked(vma);
> >       if (vma->vm_ops && vma->vm_ops->set_policy) {
> >               err = vma->vm_ops->set_policy(vma, new);
> >               if (err)
>
diff mbox series

Patch

diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b73d3248d976..6be1e5c75556 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -383,8 +383,10 @@  void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 	struct vm_area_struct *vma;
 
 	mmap_write_lock(mm);
-	for (vma = mm->mmap; vma; vma = vma->vm_next)
+	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+		vma_mark_locked(vma);
 		mpol_rebind_policy(vma->vm_policy, new);
+	}
 	mmap_write_unlock(mm);
 }
 
@@ -632,6 +634,7 @@  unsigned long change_prot_numa(struct vm_area_struct *vma,
 	struct mmu_gather tlb;
 	int nr_updated;
 
+	vma_mark_locked(vma);
 	tlb_gather_mmu(&tlb, vma->vm_mm);
 
 	nr_updated = change_protection(&tlb, vma, addr, end, PAGE_NONE,
@@ -765,6 +768,7 @@  static int vma_replace_policy(struct vm_area_struct *vma,
 	if (IS_ERR(new))
 		return PTR_ERR(new);
 
+	vma_mark_locked(vma);
 	if (vma->vm_ops && vma->vm_ops->set_policy) {
 		err = vma->vm_ops->set_policy(vma, new);
 		if (err)