diff mbox series

[v5,09/10] mmap locking API: add mmap_assert_locked

Message ID 20200422001422.232330-10-walken@google.com (mailing list archive)
State New, archived
Headers show
Series Add a new mmap locking API wrapping mmap_sem calls | expand

Commit Message

Michel Lespinasse April 22, 2020, 12:14 a.m. UTC
Add mmap_assert_locked to assert that mmap_sem is held.

Signed-off-by: Michel Lespinasse <walken@google.com>
---
 fs/userfaultfd.c          |  6 +++---
 include/linux/mmap_lock.h | 10 ++++++++++
 mm/gup.c                  |  2 +-
 mm/memory.c               |  2 +-
 4 files changed, 15 insertions(+), 5 deletions(-)

Comments

Michel Lespinasse April 22, 2020, 2:10 a.m. UTC | #1
On Tue, Apr 21, 2020 at 5:14 PM Michel Lespinasse <walken@google.com> wrote:
> +static inline void mmap_assert_locked(struct mm_struct *mm)
> +{
> +       if (IS_ENABLED(CONFIG_LOCKDEP) && debug_locks)
> +               VM_BUG_ON_MM(!lockdep_is_held(&mm->mmap_sem), mm);
> +       else
> +               VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
> +}

Turns out this function definition does not work for !CONFIG_LOCKDEP
as lockdep_is_held is not defined in that case.

The following should work instead:

static inline void mmap_assert_locked(struct mm_struct *mm)
{
#ifdef CONFIG_LOCKDEP
        if (debug_locks) {
                VM_BUG_ON_MM(!lockdep_is_held(&mm->mmap_lock), mm);
                return;
        }
#endif
        VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
}
Matthew Wilcox April 22, 2020, 2:18 a.m. UTC | #2
On Tue, Apr 21, 2020 at 07:10:39PM -0700, Michel Lespinasse wrote:
> On Tue, Apr 21, 2020 at 5:14 PM Michel Lespinasse <walken@google.com> wrote:
> > +static inline void mmap_assert_locked(struct mm_struct *mm)
> > +{
> > +       if (IS_ENABLED(CONFIG_LOCKDEP) && debug_locks)
> > +               VM_BUG_ON_MM(!lockdep_is_held(&mm->mmap_sem), mm);
> > +       else
> > +               VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
> > +}
> 
> Turns out this function definition does not work for !CONFIG_LOCKDEP
> as lockdep_is_held is not defined in that case.

Oops, sorry.  It only defines

#define lockdep_is_held_type(l, r)              (1)
#define lockdep_assert_held(l)                  do { (void)(l); } while (0)
#define lockdep_assert_held_write(l)    do { (void)(l); } while (0)
#define lockdep_assert_held_read(l)             do { (void)(l); } while (0)
#define lockdep_assert_held_once(l)             do { (void)(l); } while (0)

which seems like an oversight, but not one that you should be fixing.

> The following should work instead:
> 
> static inline void mmap_assert_locked(struct mm_struct *mm)
> {
> #ifdef CONFIG_LOCKDEP
>         if (debug_locks) {
>                 VM_BUG_ON_MM(!lockdep_is_held(&mm->mmap_lock), mm);
>                 return;
>         }
> #endif
>         VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
> }

Yes, I agree.
Michel Lespinasse April 24, 2020, 1:44 a.m. UTC | #3
On Tue, Apr 21, 2020 at 07:18:50PM -0700, Matthew Wilcox wrote:
> On Tue, Apr 21, 2020 at 07:10:39PM -0700, Michel Lespinasse wrote:
> > On Tue, Apr 21, 2020 at 5:14 PM Michel Lespinasse <walken@google.com> wrote:
> > > +static inline void mmap_assert_locked(struct mm_struct *mm)
> > > +{
> > > +       if (IS_ENABLED(CONFIG_LOCKDEP) && debug_locks)
> > > +               VM_BUG_ON_MM(!lockdep_is_held(&mm->mmap_sem), mm);
> > > +       else
> > > +               VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
> > > +}
> > 
> > Turns out this function definition does not work for !CONFIG_LOCKDEP
> > as lockdep_is_held is not defined in that case.
> 
> Oops, sorry.  It only defines
> 
> #define lockdep_is_held_type(l, r)              (1)
> #define lockdep_assert_held(l)                  do { (void)(l); } while (0)
> #define lockdep_assert_held_write(l)    do { (void)(l); } while (0)
> #define lockdep_assert_held_read(l)             do { (void)(l); } while (0)
> #define lockdep_assert_held_once(l)             do { (void)(l); } while (0)
> 
> which seems like an oversight, but not one that you should be fixing.
> 
> > The following should work instead:
> > 
> > static inline void mmap_assert_locked(struct mm_struct *mm)
> > {
> > #ifdef CONFIG_LOCKDEP
> >         if (debug_locks) {
> >                 VM_BUG_ON_MM(!lockdep_is_held(&mm->mmap_lock), mm);
> >                 return;
> >         }
> > #endif
> >         VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
> > }
> 
> Yes, I agree.

Sent an updated version of this
(also integrating your feedback on patch 10/10)

Please look for it under subject: [PATCH v5.5 09/10] mmap locking API: add mmap_assert_locked() and mmap_assert_write_locked()
diff mbox series

Patch

diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 9c645eee1a59..12b492409040 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -234,7 +234,7 @@  static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
 	pte_t *ptep, pte;
 	bool ret = true;
 
-	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+	mmap_assert_locked(mm);
 
 	ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
 
@@ -286,7 +286,7 @@  static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
 	pte_t *pte;
 	bool ret = true;
 
-	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+	mmap_assert_locked(mm);
 
 	pgd = pgd_offset(mm, address);
 	if (!pgd_present(*pgd))
@@ -405,7 +405,7 @@  vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
 	 * Coredumping runs without mmap_sem so we can only check that
 	 * the mmap_sem is held, if PF_DUMPCORE was not set.
 	 */
-	WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
+	mmap_assert_locked(mm);
 
 	ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
 	if (!ctx)
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 9e104835a0d1..f7a3a9550cc5 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -1,6 +1,8 @@ 
 #ifndef _LINUX_MMAP_LOCK_H
 #define _LINUX_MMAP_LOCK_H
 
+#include <linux/mmdebug.h>
+
 #define MMAP_LOCK_INITIALIZER(name) \
 	.mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem),
 
@@ -73,4 +75,12 @@  static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
 	up_read_non_owner(&mm->mmap_sem);
 }
 
+static inline void mmap_assert_locked(struct mm_struct *mm)
+{
+	if (IS_ENABLED(CONFIG_LOCKDEP) && debug_locks)
+		VM_BUG_ON_MM(!lockdep_is_held(&mm->mmap_sem), mm);
+	else
+		VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+}
+
 #endif /* _LINUX_MMAP_LOCK_H */
diff --git a/mm/gup.c b/mm/gup.c
index 0404e52513b2..e12993ceb711 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1403,7 +1403,7 @@  long populate_vma_page_range(struct vm_area_struct *vma,
 	VM_BUG_ON(end   & ~PAGE_MASK);
 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
 	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
-	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+	mmap_assert_locked(mm);
 
 	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
 	if (vma->vm_flags & VM_LOCKONFAULT)
diff --git a/mm/memory.c b/mm/memory.c
index e6dd3309c5a3..20f98ea8968e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1214,7 +1214,7 @@  static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
 		next = pud_addr_end(addr, end);
 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
 			if (next - addr != HPAGE_PUD_SIZE) {
-				VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
+				mmap_assert_locked(tlb->mm);
 				split_huge_pud(vma, pud, addr);
 			} else if (zap_huge_pud(tlb, vma, pud, addr))
 				goto next;