diff mbox series

[5/8] mmap locking API: convert nested write lock sites

Message ID 20200326070236.235835-6-walken@google.com (mailing list archive)
State New, archived
Headers show
Series Add a new mmap locking API wrapping mmap_sem calls | expand

Commit Message

Michel Lespinasse March 26, 2020, 7:02 a.m. UTC
Add API for nested write locks and convert the few call sites doing that.

Signed-off-by: Michel Lespinasse <walken@google.com>
---
 arch/um/include/asm/mmu_context.h |  5 +++--
 include/linux/mmap_lock.h         | 11 +++++++++++
 kernel/fork.c                     |  4 ++--
 3 files changed, 16 insertions(+), 4 deletions(-)

Comments

Matthew Wilcox (Oracle) March 26, 2020, 12:09 p.m. UTC | #1
On Thu, Mar 26, 2020 at 12:02:33AM -0700, Michel Lespinasse wrote:
> @@ -47,9 +48,9 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
>  	 * when the new ->mm is used for the first time.
>  	 */
>  	__switch_mm(&new->context.id);
> -	down_write_nested(&new->mmap_sem, 1);
> +	mmap_write_lock_nested(new, 1);
>  	uml_setup_stubs(new);
> -	mmap_write_unlock(new);
> +	mmap_write_unlock_nested(new);

This is a bit of an oddity.  We don't usually have an unlock_nested()
variant (a quick grep finds only something complicated in reiserfs).
That's because it's legitimate to release locks in a different order from
the one they were acquired in (eg lock A, lock B, unlock A, unlock B), and
it's not clear whether "nested" would follow the lock (ie unlock_nested B)
or whether it would follow the code (ie unlock_nested A).

Does your future API require knowing the nested nature at the unlock
point?  And if so, does it require it for A or B in the above scenario?
And how does it mix with lock A or B being of a different type (eg a
plain mutex or a spinlock)?
Michel Lespinasse March 26, 2020, 12:56 p.m. UTC | #2
On Thu, Mar 26, 2020 at 5:09 AM Matthew Wilcox <willy@infradead.org> wrote:
> On Thu, Mar 26, 2020 at 12:02:33AM -0700, Michel Lespinasse wrote:
> > @@ -47,9 +48,9 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
> >        * when the new ->mm is used for the first time.
> >        */
> >       __switch_mm(&new->context.id);
> > -     down_write_nested(&new->mmap_sem, 1);
> > +     mmap_write_lock_nested(new, 1);
> >       uml_setup_stubs(new);
> > -     mmap_write_unlock(new);
> > +     mmap_write_unlock_nested(new);
>
> This is a bit of an oddity.  We don't usually have an unlock_nested()
> variant (a quick grep finds only something complicated in reiserfs).
> That's because it's legitimate to release locks in a different order from
> the one they were acquired in (eg lock A, lock B, unlock A, unlock B), and
> it's not clear whether "nested" would follow the lock (ie unlock_nested B)
> or whether it would follow the code (ie unlock_nested A).
>
> Does your future API require knowing the nested nature at the unlock
> point?  And if so, does it require it for A or B in the above scenario?
> And how does it mix with lock A or B being of a different type (eg a
> plain mutex or a spinlock)?

I'll admit it's a bit unusual.

In MM we have only two uses nested mmap locks (as you can see in this
patch), and they both release locks in the opposite order as they
acquire them. We could probably follow this pattern if additional use
cases end up being needed.

In my range locking patchset, nested locks need to pass an explicit
lock range. Also when implementing mmap_sem locking latencies, it can
be convenient to ignore the nested locks under the assumption that
their hold interval is contained within the outer lock's hold
interval.
diff mbox series

Patch

diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 62262c5c7785..cc15173f7518 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -8,6 +8,7 @@ 
 
 #include <linux/sched.h>
 #include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
 
 #include <asm/mmu.h>
 
@@ -47,9 +48,9 @@  static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
 	 * when the new ->mm is used for the first time.
 	 */
 	__switch_mm(&new->context.id);
-	down_write_nested(&new->mmap_sem, 1);
+	mmap_write_lock_nested(new, 1);
 	uml_setup_stubs(new);
-	mmap_write_unlock(new);
+	mmap_write_unlock_nested(new);
 }
 
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index cffd25afe92b..40a972a26857 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -11,6 +11,11 @@  static inline void mmap_write_lock(struct mm_struct *mm)
 	down_write(&mm->mmap_sem);
 }
 
+static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
+{
+	down_write_nested(&mm->mmap_sem, subclass);
+}
+
 static inline int mmap_write_lock_killable(struct mm_struct *mm)
 {
 	return down_write_killable(&mm->mmap_sem);
@@ -26,6 +31,12 @@  static inline void mmap_write_unlock(struct mm_struct *mm)
 	up_write(&mm->mmap_sem);
 }
 
+/* Pairs with mmap_write_lock_nested() */
+static inline void mmap_write_unlock_nested(struct mm_struct *mm)
+{
+	up_write(&mm->mmap_sem);
+}
+
 static inline void mmap_downgrade_write_lock(struct mm_struct *mm)
 {
 	downgrade_write(&mm->mmap_sem);
diff --git a/kernel/fork.c b/kernel/fork.c
index c321910d46e8..3460308b2213 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -497,7 +497,7 @@  static __latent_entropy int dup_mmap(struct mm_struct *mm,
 	/*
 	 * Not linked in yet - no deadlock potential:
 	 */
-	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
+	mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING);
 
 	/* No ordering required: file already has been exposed. */
 	RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
@@ -612,7 +612,7 @@  static __latent_entropy int dup_mmap(struct mm_struct *mm,
 	/* a new mm has just been created */
 	retval = arch_dup_mmap(oldmm, mm);
 out:
-	mmap_write_unlock(mm);
+	mmap_write_unlock_nested(mm);
 	flush_tlb_mm(oldmm);
 	mmap_write_unlock(oldmm);
 	dup_userfaultfd_complete(&uf);