diff mbox series

[RFC,RESEND,14/28] mm: mark VMAs as locked before isolating them

Message ID 20220901173516.702122-15-surenb@google.com (mailing list archive)
State New
Headers show
Series per-VMA locks proposal | expand

Commit Message

Suren Baghdasaryan Sept. 1, 2022, 5:35 p.m. UTC
Mark VMAs as locked before isolating them and clear their tree node so
that isolated VMAs are easily identifiable. In the later patches page
fault handlers will try locking the found VMA and will check whether
the VMA was isolated. Locking VMAs before isolating them ensures that
page fault handlers don't operate on isolated VMAs.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 mm/mmap.c  | 2 ++
 mm/nommu.c | 2 ++
 2 files changed, 4 insertions(+)

Comments

Laurent Dufour Sept. 9, 2022, 1:35 p.m. UTC | #1
Le 01/09/2022 à 19:35, Suren Baghdasaryan a écrit :
> Mark VMAs as locked before isolating them and clear their tree node so
> that isolated VMAs are easily identifiable. In the later patches page
> fault handlers will try locking the found VMA and will check whether
> the VMA was isolated. Locking VMAs before isolating them ensures that
> page fault handlers don't operate on isolated VMAs.

Found another place where the VMA should probably mark locked:
*** drivers/gpu/drm/drm_vma_manager.c:
drm_vma_node_revoke[338]       rb_erase(&entry->vm_rb, &node->vm_files);

There are 2 others entries in nommu.c but I guess this is not supported,
isn't it?


> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
>  mm/mmap.c  | 2 ++
>  mm/nommu.c | 2 ++
>  2 files changed, 4 insertions(+)
> 
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 094678b4434b..b0d78bdc0de0 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -421,12 +421,14 @@ static inline void vma_rb_insert(struct vm_area_struct *vma,
>  
>  static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
>  {
> +	vma_mark_locked(vma);
>  	/*
>  	 * Note rb_erase_augmented is a fairly large inline function,
>  	 * so make sure we instantiate it only once with our desired
>  	 * augmented rbtree callbacks.
>  	 */
>  	rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
> +	RB_CLEAR_NODE(&vma->vm_rb);
>  }
>  
>  static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
> diff --git a/mm/nommu.c b/mm/nommu.c
> index e819cbc21b39..ff9933e57501 100644
> --- a/mm/nommu.c
> +++ b/mm/nommu.c
> @@ -622,6 +622,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
>  	struct mm_struct *mm = vma->vm_mm;
>  	struct task_struct *curr = current;
>  
> +	vma_mark_locked(vma);
>  	mm->map_count--;
>  	for (i = 0; i < VMACACHE_SIZE; i++) {
>  		/* if the vma is cached, invalidate the entire cache */
> @@ -644,6 +645,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
>  
>  	/* remove from the MM's tree and list */
>  	rb_erase(&vma->vm_rb, &mm->mm_rb);
> +	RB_CLEAR_NODE(&vma->vm_rb);
>  
>  	__vma_unlink_list(mm, vma);
>  }
Suren Baghdasaryan Sept. 9, 2022, 4:28 p.m. UTC | #2
On Fri, Sep 9, 2022 at 6:35 AM Laurent Dufour <ldufour@linux.ibm.com> wrote:
>
> Le 01/09/2022 à 19:35, Suren Baghdasaryan a écrit :
> > Mark VMAs as locked before isolating them and clear their tree node so
> > that isolated VMAs are easily identifiable. In the later patches page
> > fault handlers will try locking the found VMA and will check whether
> > the VMA was isolated. Locking VMAs before isolating them ensures that
> > page fault handlers don't operate on isolated VMAs.
>
> Found another place where the VMA should probably mark locked:
> *** drivers/gpu/drm/drm_vma_manager.c:
> drm_vma_node_revoke[338]       rb_erase(&entry->vm_rb, &node->vm_files);

Thanks! I'll add the necessary locking.

>
> There are 2 others entries in nommu.c but I guess this is not supported,
> isn't it?

Yes, PER_VMA_LOCK config depends on MMU but for completeness we could
add locking there as well (it will be compiled out).

>
>
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > ---
> >  mm/mmap.c  | 2 ++
> >  mm/nommu.c | 2 ++
> >  2 files changed, 4 insertions(+)
> >
> > diff --git a/mm/mmap.c b/mm/mmap.c
> > index 094678b4434b..b0d78bdc0de0 100644
> > --- a/mm/mmap.c
> > +++ b/mm/mmap.c
> > @@ -421,12 +421,14 @@ static inline void vma_rb_insert(struct vm_area_struct *vma,
> >
> >  static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
> >  {
> > +     vma_mark_locked(vma);
> >       /*
> >        * Note rb_erase_augmented is a fairly large inline function,
> >        * so make sure we instantiate it only once with our desired
> >        * augmented rbtree callbacks.
> >        */
> >       rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
> > +     RB_CLEAR_NODE(&vma->vm_rb);
> >  }
> >
> >  static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
> > diff --git a/mm/nommu.c b/mm/nommu.c
> > index e819cbc21b39..ff9933e57501 100644
> > --- a/mm/nommu.c
> > +++ b/mm/nommu.c
> > @@ -622,6 +622,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
> >       struct mm_struct *mm = vma->vm_mm;
> >       struct task_struct *curr = current;
> >
> > +     vma_mark_locked(vma);
> >       mm->map_count--;
> >       for (i = 0; i < VMACACHE_SIZE; i++) {
> >               /* if the vma is cached, invalidate the entire cache */
> > @@ -644,6 +645,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
> >
> >       /* remove from the MM's tree and list */
> >       rb_erase(&vma->vm_rb, &mm->mm_rb);
> > +     RB_CLEAR_NODE(&vma->vm_rb);
> >
> >       __vma_unlink_list(mm, vma);
> >  }
>
diff mbox series

Patch

diff --git a/mm/mmap.c b/mm/mmap.c
index 094678b4434b..b0d78bdc0de0 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -421,12 +421,14 @@  static inline void vma_rb_insert(struct vm_area_struct *vma,
 
 static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
 {
+	vma_mark_locked(vma);
 	/*
 	 * Note rb_erase_augmented is a fairly large inline function,
 	 * so make sure we instantiate it only once with our desired
 	 * augmented rbtree callbacks.
 	 */
 	rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
+	RB_CLEAR_NODE(&vma->vm_rb);
 }
 
 static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
diff --git a/mm/nommu.c b/mm/nommu.c
index e819cbc21b39..ff9933e57501 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -622,6 +622,7 @@  static void delete_vma_from_mm(struct vm_area_struct *vma)
 	struct mm_struct *mm = vma->vm_mm;
 	struct task_struct *curr = current;
 
+	vma_mark_locked(vma);
 	mm->map_count--;
 	for (i = 0; i < VMACACHE_SIZE; i++) {
 		/* if the vma is cached, invalidate the entire cache */
@@ -644,6 +645,7 @@  static void delete_vma_from_mm(struct vm_area_struct *vma)
 
 	/* remove from the MM's tree and list */
 	rb_erase(&vma->vm_rb, &mm->mm_rb);
+	RB_CLEAR_NODE(&vma->vm_rb);
 
 	__vma_unlink_list(mm, vma);
 }