diff mbox series

[v4,09/10] mmap locking API: use lockdep_assert_held

Message ID 20200415004353.130248-10-walken@google.com (mailing list archive)
State New, archived
Headers show
Series Add a new mmap locking API wrapping mmap_sem calls | expand

Commit Message

Michel Lespinasse April 15, 2020, 12:43 a.m. UTC
Use lockdep_assert_held when asserting that mmap_sem is held.

Using this instead of rwsem_is_locked makes the assertions more
tolerant of future changes to the lock type.

Signed-off-by: Michel Lespinasse <walken@google.com>
---
 fs/userfaultfd.c | 6 +++---
 mm/gup.c         | 2 +-
 mm/memory.c      | 2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)

Comments

Matthew Wilcox April 21, 2020, 2:35 a.m. UTC | #1
On Tue, Apr 14, 2020 at 05:43:52PM -0700, Michel Lespinasse wrote:
> Use lockdep_assert_held when asserting that mmap_sem is held.
> 
> Using this instead of rwsem_is_locked makes the assertions more
> tolerant of future changes to the lock type.

Somebody pointed out on an earlier iteration of this patch set that
rwsem_is_locked() is enabled on all rwsems whereas lockdep_assert_held()
is only enabled when lockdep is enabled, which is not production setups.

How about this?

static inline void mm_assert_locked(struct mm_struct *mm)
{
	if (IS_ENABLED(CONFIG_LOCKDEP) && debug_locks)
		VM_BUG_ON_MM(!lockdep_is_held(&mm->mmap_sem), mm);
	else
		VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
}
diff mbox series

Patch

diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 9c645eee1a59..81ae8315f1e6 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -234,7 +234,7 @@  static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
 	pte_t *ptep, pte;
 	bool ret = true;
 
-	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+	lockdep_assert_held(&mm->mmap_sem);
 
 	ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
 
@@ -286,7 +286,7 @@  static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
 	pte_t *pte;
 	bool ret = true;
 
-	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+	lockdep_assert_held(&mm->mmap_sem);
 
 	pgd = pgd_offset(mm, address);
 	if (!pgd_present(*pgd))
@@ -405,7 +405,7 @@  vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
 	 * Coredumping runs without mmap_sem so we can only check that
 	 * the mmap_sem is held, if PF_DUMPCORE was not set.
 	 */
-	WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
+	lockdep_assert_held(&mm->mmap_sem);
 
 	ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
 	if (!ctx)
diff --git a/mm/gup.c b/mm/gup.c
index 0404e52513b2..dd8f045b047b 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1403,7 +1403,7 @@  long populate_vma_page_range(struct vm_area_struct *vma,
 	VM_BUG_ON(end   & ~PAGE_MASK);
 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
 	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
-	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+	lockdep_assert_held(&mm->mmap_sem);
 
 	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
 	if (vma->vm_flags & VM_LOCKONFAULT)
diff --git a/mm/memory.c b/mm/memory.c
index e6dd3309c5a3..66baf5142e6b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1214,7 +1214,7 @@  static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
 		next = pud_addr_end(addr, end);
 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
 			if (next - addr != HPAGE_PUD_SIZE) {
-				VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
+				lockdep_assert_held(&tlb->mm->mmap_sem);
 				split_huge_pud(vma, pud, addr);
 			} else if (zap_huge_pud(tlb, vma, pud, addr))
 				goto next;