diff mbox series

[v3,06/10] mmap locking API: convert nested write lock sites

Message ID 20200327225102.25061-7-walken@google.com (mailing list archive)
State New, archived
Headers show
Series Add a new mmap locking API wrapping mmap_sem calls | expand

Commit Message

Michel Lespinasse March 27, 2020, 10:50 p.m. UTC
Add API for nested write locks and convert the few call sites doing that.

Signed-off-by: Michel Lespinasse <walken@google.com>
---
 arch/um/include/asm/mmu_context.h |  5 +++--
 include/linux/mmap_lock.h         | 11 +++++++++++
 kernel/fork.c                     |  4 ++--
 3 files changed, 16 insertions(+), 4 deletions(-)

Comments

Peter Zijlstra April 1, 2020, 1:42 p.m. UTC | #1
On Fri, Mar 27, 2020 at 03:50:58PM -0700, Michel Lespinasse wrote:

> @@ -26,6 +31,12 @@ static inline void mmap_write_unlock(struct mm_struct *mm)
>  	up_write(&mm->mmap_sem);
>  }
>  
> +/* Pairs with mmap_write_lock_nested() */
> +static inline void mmap_write_unlock_nested(struct mm_struct *mm)
> +{
> +	up_write(&mm->mmap_sem);
> +}
> +
>  static inline void mmap_downgrade_write_lock(struct mm_struct *mm)
>  {
>  	downgrade_write(&mm->mmap_sem);

Why does unlock_nested() make sense ?
Michel Lespinasse April 6, 2020, 3:35 p.m. UTC | #2
On Wed, Apr 1, 2020 at 6:42 AM Peter Zijlstra <peterz@infradead.org> wrote:
>
> On Fri, Mar 27, 2020 at 03:50:58PM -0700, Michel Lespinasse wrote:
>
> > @@ -26,6 +31,12 @@ static inline void mmap_write_unlock(struct mm_struct *mm)
> >       up_write(&mm->mmap_sem);
> >  }
> >
> > +/* Pairs with mmap_write_lock_nested() */
> > +static inline void mmap_write_unlock_nested(struct mm_struct *mm)
> > +{
> > +     up_write(&mm->mmap_sem);
> > +}
> > +
> >  static inline void mmap_downgrade_write_lock(struct mm_struct *mm)
> >  {
> >       downgrade_write(&mm->mmap_sem);
>
> Why does unlock_nested() make sense ?

I thought it would make things more explicit to match the nested lock
with the corresponding unlock site; however this information is not
used at the moment (i.e. the nested unlock is implemented identically
to the regular unlock).

Having the matching sites explicitly identified may help when
implementing lock instrumentation, or when changing the lock type
(another patchset I am working on needs to pass an explicit lock range
to the nested lock and unlock sites).

I'll admit this is not a super strong argument, and can be deferred to
when an actual need shows up in the future.
Peter Zijlstra April 6, 2020, 3:58 p.m. UTC | #3
On Mon, Apr 06, 2020 at 08:35:03AM -0700, Michel Lespinasse wrote:
> On Wed, Apr 1, 2020 at 6:42 AM Peter Zijlstra <peterz@infradead.org> wrote:
> >
> > On Fri, Mar 27, 2020 at 03:50:58PM -0700, Michel Lespinasse wrote:
> >
> > > @@ -26,6 +31,12 @@ static inline void mmap_write_unlock(struct mm_struct *mm)
> > >       up_write(&mm->mmap_sem);
> > >  }
> > >
> > > +/* Pairs with mmap_write_lock_nested() */
> > > +static inline void mmap_write_unlock_nested(struct mm_struct *mm)
> > > +{
> > > +     up_write(&mm->mmap_sem);
> > > +}
> > > +
> > >  static inline void mmap_downgrade_write_lock(struct mm_struct *mm)
> > >  {
> > >       downgrade_write(&mm->mmap_sem);
> >
> > Why does unlock_nested() make sense ?
> 
> I thought it would make things more explicit to match the nested lock
> with the corresponding unlock site; however this information is not
> used at the moment (i.e. the nested unlock is implemented identically
> to the regular unlock).
> 
> Having the matching sites explicitly identified may help when
> implementing lock instrumentation, or when changing the lock type
> (another patchset I am working on needs to pass an explicit lock range
> to the nested lock and unlock sites).
> 
> I'll admit this is not a super strong argument, and can be deferred to
> when an actual need shows up in the future.

The thing is, lock-acquisition order matters _a_lot_, lock-release order
is irrelevant. ISTR there was a thread about this some 14 years ago, but
please don't ask me to go find it :/
diff mbox series

Patch

diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 62262c5c7785..cc15173f7518 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -8,6 +8,7 @@ 
 
 #include <linux/sched.h>
 #include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
 
 #include <asm/mmu.h>
 
@@ -47,9 +48,9 @@  static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
 	 * when the new ->mm is used for the first time.
 	 */
 	__switch_mm(&new->context.id);
-	down_write_nested(&new->mmap_sem, 1);
+	mmap_write_lock_nested(new, 1);
 	uml_setup_stubs(new);
-	mmap_write_unlock(new);
+	mmap_write_unlock_nested(new);
 }
 
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 8b5a3cd56118..36fb758401d6 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -11,6 +11,11 @@  static inline void mmap_write_lock(struct mm_struct *mm)
 	down_write(&mm->mmap_sem);
 }
 
+static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
+{
+	down_write_nested(&mm->mmap_sem, subclass);
+}
+
 static inline int mmap_write_lock_killable(struct mm_struct *mm)
 {
 	return down_write_killable(&mm->mmap_sem);
@@ -26,6 +31,12 @@  static inline void mmap_write_unlock(struct mm_struct *mm)
 	up_write(&mm->mmap_sem);
 }
 
+/* Pairs with mmap_write_lock_nested() */
+static inline void mmap_write_unlock_nested(struct mm_struct *mm)
+{
+	up_write(&mm->mmap_sem);
+}
+
 static inline void mmap_downgrade_write_lock(struct mm_struct *mm)
 {
 	downgrade_write(&mm->mmap_sem);
diff --git a/kernel/fork.c b/kernel/fork.c
index c321910d46e8..3460308b2213 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -497,7 +497,7 @@  static __latent_entropy int dup_mmap(struct mm_struct *mm,
 	/*
 	 * Not linked in yet - no deadlock potential:
 	 */
-	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
+	mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING);
 
 	/* No ordering required: file already has been exposed. */
 	RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
@@ -612,7 +612,7 @@  static __latent_entropy int dup_mmap(struct mm_struct *mm,
 	/* a new mm has just been created */
 	retval = arch_dup_mmap(oldmm, mm);
 out:
-	mmap_write_unlock(mm);
+	mmap_write_unlock_nested(mm);
 	flush_tlb_mm(oldmm);
 	mmap_write_unlock(oldmm);
 	dup_userfaultfd_complete(&uf);