diff mbox series

[v5,06/10] mmap locking API: convert nested write lock sites

Message ID 20200422001422.232330-7-walken@google.com (mailing list archive)
State New, archived
Headers show
Series Add a new mmap locking API wrapping mmap_sem calls | expand

Commit Message

Michel Lespinasse April 22, 2020, 12:14 a.m. UTC
Add API for nested write locks and convert the few call sites doing that.

Signed-off-by: Michel Lespinasse <walken@google.com>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
---
 arch/um/include/asm/mmu_context.h | 3 ++-
 include/linux/mmap_lock.h         | 5 +++++
 kernel/fork.c                     | 2 +-
 3 files changed, 8 insertions(+), 2 deletions(-)

Comments

Vlastimil Babka May 18, 2020, 10:32 a.m. UTC | #1
On 4/22/20 2:14 AM, Michel Lespinasse wrote:
> Add API for nested write locks and convert the few call sites doing that.
> 
> Signed-off-by: Michel Lespinasse <walken@google.com>
> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>

Reviewed-by: Vlastimil Babka <vbabka@suse.cz>

Perhaps we could even move SINGLE_DEPTH_NESTING into the wrapper? It's unlikely
there will be a new user with a different subclass?

> ---
>  arch/um/include/asm/mmu_context.h | 3 ++-
>  include/linux/mmap_lock.h         | 5 +++++
>  kernel/fork.c                     | 2 +-
>  3 files changed, 8 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
> index 62262c5c7785..17ddd4edf875 100644
> --- a/arch/um/include/asm/mmu_context.h
> +++ b/arch/um/include/asm/mmu_context.h
> @@ -8,6 +8,7 @@
>  
>  #include <linux/sched.h>
>  #include <linux/mm_types.h>
> +#include <linux/mmap_lock.h>
>  
>  #include <asm/mmu.h>
>  
> @@ -47,7 +48,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
>  	 * when the new ->mm is used for the first time.
>  	 */
>  	__switch_mm(&new->context.id);
> -	down_write_nested(&new->mmap_sem, 1);
> +	mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING);
>  	uml_setup_stubs(new);
>  	mmap_write_unlock(new);
>  }
> diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> index 97ac53b66052..a757cb30ae77 100644
> --- a/include/linux/mmap_lock.h
> +++ b/include/linux/mmap_lock.h
> @@ -11,6 +11,11 @@ static inline void mmap_write_lock(struct mm_struct *mm)
>  	down_write(&mm->mmap_sem);
>  }
>  
> +static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
> +{
> +	down_write_nested(&mm->mmap_sem, subclass);
> +}
> +
>  static inline int mmap_write_lock_killable(struct mm_struct *mm)
>  {
>  	return down_write_killable(&mm->mmap_sem);
> diff --git a/kernel/fork.c b/kernel/fork.c
> index 41d3f45c058e..a5d1d20ccba7 100644
> --- a/kernel/fork.c
> +++ b/kernel/fork.c
> @@ -499,7 +499,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
>  	/*
>  	 * Not linked in yet - no deadlock potential:
>  	 */
> -	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
> +	mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING);
>  
>  	/* No ordering required: file already has been exposed. */
>  	RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
>
Laurent Dufour May 18, 2020, 1:24 p.m. UTC | #2
Le 22/04/2020 à 02:14, Michel Lespinasse a écrit :
> Add API for nested write locks and convert the few call sites doing that.
> 
> Signed-off-by: Michel Lespinasse <walken@google.com>
> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>

Reviewed-by: Laurent Dufour <ldufour@linux.ibm.com>

> ---
>   arch/um/include/asm/mmu_context.h | 3 ++-
>   include/linux/mmap_lock.h         | 5 +++++
>   kernel/fork.c                     | 2 +-
>   3 files changed, 8 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
> index 62262c5c7785..17ddd4edf875 100644
> --- a/arch/um/include/asm/mmu_context.h
> +++ b/arch/um/include/asm/mmu_context.h
> @@ -8,6 +8,7 @@
>   
>   #include <linux/sched.h>
>   #include <linux/mm_types.h>
> +#include <linux/mmap_lock.h>
>   
>   #include <asm/mmu.h>
>   
> @@ -47,7 +48,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
>   	 * when the new ->mm is used for the first time.
>   	 */
>   	__switch_mm(&new->context.id);
> -	down_write_nested(&new->mmap_sem, 1);
> +	mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING);
>   	uml_setup_stubs(new);
>   	mmap_write_unlock(new);
>   }
> diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> index 97ac53b66052..a757cb30ae77 100644
> --- a/include/linux/mmap_lock.h
> +++ b/include/linux/mmap_lock.h
> @@ -11,6 +11,11 @@ static inline void mmap_write_lock(struct mm_struct *mm)
>   	down_write(&mm->mmap_sem);
>   }
>   
> +static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
> +{
> +	down_write_nested(&mm->mmap_sem, subclass);
> +}
> +
>   static inline int mmap_write_lock_killable(struct mm_struct *mm)
>   {
>   	return down_write_killable(&mm->mmap_sem);
> diff --git a/kernel/fork.c b/kernel/fork.c
> index 41d3f45c058e..a5d1d20ccba7 100644
> --- a/kernel/fork.c
> +++ b/kernel/fork.c
> @@ -499,7 +499,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
>   	/*
>   	 * Not linked in yet - no deadlock potential:
>   	 */
> -	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
> +	mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING);
>   
>   	/* No ordering required: file already has been exposed. */
>   	RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
>
Michel Lespinasse May 19, 2020, 12:54 p.m. UTC | #3
On Mon, May 18, 2020 at 12:32:03PM +0200, Vlastimil Babka wrote:
> On 4/22/20 2:14 AM, Michel Lespinasse wrote:
> > Add API for nested write locks and convert the few call sites doing that.
> > 
> > Signed-off-by: Michel Lespinasse <walken@google.com>
> > Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
> 
> Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
> 
> Perhaps we could even move SINGLE_DEPTH_NESTING into the wrapper? It's unlikely
> there will be a new user with a different subclass?

I think I'll leave it in the API for now. I don't foresee new uses
being added as long as we stick to coarse mmap locking, but if
extending the api to support range locking it'd become more likely
that we'd want to lock multiple ranges for mremap...
diff mbox series

Patch

diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 62262c5c7785..17ddd4edf875 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -8,6 +8,7 @@ 
 
 #include <linux/sched.h>
 #include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
 
 #include <asm/mmu.h>
 
@@ -47,7 +48,7 @@  static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
 	 * when the new ->mm is used for the first time.
 	 */
 	__switch_mm(&new->context.id);
-	down_write_nested(&new->mmap_sem, 1);
+	mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING);
 	uml_setup_stubs(new);
 	mmap_write_unlock(new);
 }
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 97ac53b66052..a757cb30ae77 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -11,6 +11,11 @@  static inline void mmap_write_lock(struct mm_struct *mm)
 	down_write(&mm->mmap_sem);
 }
 
+static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
+{
+	down_write_nested(&mm->mmap_sem, subclass);
+}
+
 static inline int mmap_write_lock_killable(struct mm_struct *mm)
 {
 	return down_write_killable(&mm->mmap_sem);
diff --git a/kernel/fork.c b/kernel/fork.c
index 41d3f45c058e..a5d1d20ccba7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -499,7 +499,7 @@  static __latent_entropy int dup_mmap(struct mm_struct *mm,
 	/*
 	 * Not linked in yet - no deadlock potential:
 	 */
-	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
+	mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING);
 
 	/* No ordering required: file already has been exposed. */
 	RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));