diff mbox series

[RFC,RESEND,16/28] kernel/fork: assert no VMA readers during its destruction

Message ID 20220901173516.702122-17-surenb@google.com (mailing list archive)
State New
Headers show
Series per-VMA locks proposal | expand

Commit Message

Suren Baghdasaryan Sept. 1, 2022, 5:35 p.m. UTC
Assert there are no holders of VMA lock for reading when it is about to be
destroyed.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 include/linux/mm.h | 8 ++++++++
 kernel/fork.c      | 2 ++
 2 files changed, 10 insertions(+)

Comments

Laurent Dufour Sept. 9, 2022, 1:56 p.m. UTC | #1
Le 01/09/2022 à 19:35, Suren Baghdasaryan a écrit :
> Assert there are no holders of VMA lock for reading when it is about to be
> destroyed.
> 
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
>  include/linux/mm.h | 8 ++++++++
>  kernel/fork.c      | 2 ++
>  2 files changed, 10 insertions(+)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index dc72be923e5b..0d9c1563c354 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -676,6 +676,13 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma, int pos)
>  	VM_BUG_ON_VMA(vma->vm_lock_seq != READ_ONCE(vma->vm_mm->mm_lock_seq), vma);
>  }
>  
> +static inline void vma_assert_no_reader(struct vm_area_struct *vma)
> +{
> +	VM_BUG_ON_VMA(rwsem_is_locked(&vma->lock) &&
> +		      vma->vm_lock_seq != READ_ONCE(vma->vm_mm->mm_lock_seq),
> +		      vma);
> +}
> +
>  #else /* CONFIG_PER_VMA_LOCK */
>  
>  static inline void vma_init_lock(struct vm_area_struct *vma) {}
> @@ -685,6 +692,7 @@ static inline bool vma_read_trylock(struct vm_area_struct *vma)
>  static inline void vma_read_unlock(struct vm_area_struct *vma) {}
>  static inline void vma_assert_locked(struct vm_area_struct *vma) {}
>  static inline void vma_assert_write_locked(struct vm_area_struct *vma, int pos) {}
> +static inline void vma_assert_no_reader(struct vm_area_struct *vma) {}
>  
>  #endif /* CONFIG_PER_VMA_LOCK */
>  
> diff --git a/kernel/fork.c b/kernel/fork.c
> index 1872ad549fed..b443ba3a247a 100644
> --- a/kernel/fork.c
> +++ b/kernel/fork.c
> @@ -487,6 +487,8 @@ static void __vm_area_free(struct rcu_head *head)
>  {
>  	struct vm_area_struct *vma = container_of(head, struct vm_area_struct,
>  						  vm_rcu);
> +	/* The vma should either have no lock holders or be write-locked. */
> +	vma_assert_no_reader(vma);

I'm wondering if this can be hit in the case the thread freeing a VMA is
preempted before incrementing the mm ref count, like this:

VMA is about to be freed
write lock VMA
free vma -> call_rcu
..
<--- thread preempted
	rcu handler runs
	rcu calls __vm_area_free() <<<<<<
unlock mmap_lock and increase the mm seq count


>  	kmem_cache_free(vm_area_cachep, vma);
>  }
>  #endif
Suren Baghdasaryan Sept. 9, 2022, 4:19 p.m. UTC | #2
On Fri, Sep 9, 2022 at 6:56 AM Laurent Dufour <ldufour@linux.ibm.com> wrote:
>
> Le 01/09/2022 à 19:35, Suren Baghdasaryan a écrit :
> > Assert there are no holders of VMA lock for reading when it is about to be
> > destroyed.
> >
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > ---
> >  include/linux/mm.h | 8 ++++++++
> >  kernel/fork.c      | 2 ++
> >  2 files changed, 10 insertions(+)
> >
> > diff --git a/include/linux/mm.h b/include/linux/mm.h
> > index dc72be923e5b..0d9c1563c354 100644
> > --- a/include/linux/mm.h
> > +++ b/include/linux/mm.h
> > @@ -676,6 +676,13 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma, int pos)
> >       VM_BUG_ON_VMA(vma->vm_lock_seq != READ_ONCE(vma->vm_mm->mm_lock_seq), vma);
> >  }
> >
> > +static inline void vma_assert_no_reader(struct vm_area_struct *vma)
> > +{
> > +     VM_BUG_ON_VMA(rwsem_is_locked(&vma->lock) &&
> > +                   vma->vm_lock_seq != READ_ONCE(vma->vm_mm->mm_lock_seq),
> > +                   vma);
> > +}
> > +
> >  #else /* CONFIG_PER_VMA_LOCK */
> >
> >  static inline void vma_init_lock(struct vm_area_struct *vma) {}
> > @@ -685,6 +692,7 @@ static inline bool vma_read_trylock(struct vm_area_struct *vma)
> >  static inline void vma_read_unlock(struct vm_area_struct *vma) {}
> >  static inline void vma_assert_locked(struct vm_area_struct *vma) {}
> >  static inline void vma_assert_write_locked(struct vm_area_struct *vma, int pos) {}
> > +static inline void vma_assert_no_reader(struct vm_area_struct *vma) {}
> >
> >  #endif /* CONFIG_PER_VMA_LOCK */
> >
> > diff --git a/kernel/fork.c b/kernel/fork.c
> > index 1872ad549fed..b443ba3a247a 100644
> > --- a/kernel/fork.c
> > +++ b/kernel/fork.c
> > @@ -487,6 +487,8 @@ static void __vm_area_free(struct rcu_head *head)
> >  {
> >       struct vm_area_struct *vma = container_of(head, struct vm_area_struct,
> >                                                 vm_rcu);
> > +     /* The vma should either have no lock holders or be write-locked. */
> > +     vma_assert_no_reader(vma);
>
> I'm wondering if this can be hit in the case the thread freeing a VMA is
> preempted before incrementing the mm ref count, like this:
>
> VMA is about to be freed
> write lock VMA
> free vma -> call_rcu
> ..
> <--- thread preempted
>         rcu handler runs
>         rcu calls __vm_area_free() <<<<<<

At this point the VMA is still write-locked (mm seq count hasn't been
incremented yet), correct? If so then vma_assert_no_reader() will not
assert because the second condition of VMA being write-locked is
satisfied. Did I miss anything?

> unlock mmap_lock and increase the mm seq count
>
>
> >       kmem_cache_free(vm_area_cachep, vma);
> >  }
> >  #endif
>
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index dc72be923e5b..0d9c1563c354 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -676,6 +676,13 @@  static inline void vma_assert_write_locked(struct vm_area_struct *vma, int pos)
 	VM_BUG_ON_VMA(vma->vm_lock_seq != READ_ONCE(vma->vm_mm->mm_lock_seq), vma);
 }
 
+static inline void vma_assert_no_reader(struct vm_area_struct *vma)
+{
+	VM_BUG_ON_VMA(rwsem_is_locked(&vma->lock) &&
+		      vma->vm_lock_seq != READ_ONCE(vma->vm_mm->mm_lock_seq),
+		      vma);
+}
+
 #else /* CONFIG_PER_VMA_LOCK */
 
 static inline void vma_init_lock(struct vm_area_struct *vma) {}
@@ -685,6 +692,7 @@  static inline bool vma_read_trylock(struct vm_area_struct *vma)
 static inline void vma_read_unlock(struct vm_area_struct *vma) {}
 static inline void vma_assert_locked(struct vm_area_struct *vma) {}
 static inline void vma_assert_write_locked(struct vm_area_struct *vma, int pos) {}
+static inline void vma_assert_no_reader(struct vm_area_struct *vma) {}
 
 #endif /* CONFIG_PER_VMA_LOCK */
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 1872ad549fed..b443ba3a247a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -487,6 +487,8 @@  static void __vm_area_free(struct rcu_head *head)
 {
 	struct vm_area_struct *vma = container_of(head, struct vm_area_struct,
 						  vm_rcu);
+	/* The vma should either have no lock holders or be write-locked. */
+	vma_assert_no_reader(vma);
 	kmem_cache_free(vm_area_cachep, vma);
 }
 #endif