diff mbox series

[v2,09/10] mmap locking API: use lockdep_assert_held

Message ID 20200327021058.221911-10-walken@google.com (mailing list archive)
State New, archived
Headers show
Series Add a new mmap locking API wrapping mmap_sem calls | expand

Commit Message

Michel Lespinasse March 27, 2020, 2:10 a.m. UTC
Use lockdep_assert_held when asserting that mmap_sem is held.

Using this instead of rwsem_is_locked makes the assertions more
tolerant of future changes to the lock type.

Signed-off-by: Michel Lespinasse <walken@google.com>
---
 fs/userfaultfd.c | 6 +++---
 mm/gup.c         | 2 +-
 mm/memory.c      | 2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)

Comments

Davidlohr Bueso March 27, 2020, 4:48 a.m. UTC | #1
On Thu, 26 Mar 2020, Michel Lespinasse wrote:

>Use lockdep_assert_held when asserting that mmap_sem is held.
>
>Using this instead of rwsem_is_locked makes the assertions more
>tolerant of future changes to the lock type.

Not opposing here, just worth mentioning that the coverage surface
is greatly reduced, lockdep not being very popular in production.

Thanks,
Davidlohr
diff mbox series

Patch

diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 5914eabd8185..ad1ce223ee6a 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -234,7 +234,7 @@  static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
 	pte_t *ptep, pte;
 	bool ret = true;
 
-	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+	lockdep_assert_held(&mm->mmap_sem);
 
 	ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
 
@@ -286,7 +286,7 @@  static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
 	pte_t *pte;
 	bool ret = true;
 
-	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+	lockdep_assert_held(&mm->mmap_sem);
 
 	pgd = pgd_offset(mm, address);
 	if (!pgd_present(*pgd))
@@ -376,7 +376,7 @@  vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
 	 * Coredumping runs without mmap_sem so we can only check that
 	 * the mmap_sem is held, if PF_DUMPCORE was not set.
 	 */
-	WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
+	lockdep_assert_held(&mm->mmap_sem);
 
 	ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
 	if (!ctx)
diff --git a/mm/gup.c b/mm/gup.c
index d78965738e7e..1e225eba4787 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1154,7 +1154,7 @@  long populate_vma_page_range(struct vm_area_struct *vma,
 	VM_BUG_ON(end   & ~PAGE_MASK);
 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
 	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
-	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+	lockdep_assert_held(&mm->mmap_sem);
 
 	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
 	if (vma->vm_flags & VM_LOCKONFAULT)
diff --git a/mm/memory.c b/mm/memory.c
index 03fce44eee16..4c125f0a1df9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1202,7 +1202,7 @@  static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
 		next = pud_addr_end(addr, end);
 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
 			if (next - addr != HPAGE_PUD_SIZE) {
-				VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
+				lockdep_assert_held(&tlb->mm->mmap_sem);
 				split_huge_pud(vma, pud, addr);
 			} else if (zap_huge_pud(tlb, vma, pud, addr))
 				goto next;