@@ -234,7 +234,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
pte_t *ptep, pte;
bool ret = true;
- VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+ lockdep_assert_held(&mm->mmap_sem);
ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
@@ -286,7 +286,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
pte_t *pte;
bool ret = true;
- VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+ lockdep_assert_held(&mm->mmap_sem);
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
@@ -405,7 +405,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
* Coredumping runs without mmap_sem so we can only check that
* the mmap_sem is held, if PF_DUMPCORE was not set.
*/
- WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
+ lockdep_assert_held(&mm->mmap_sem);
ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
if (!ctx)
@@ -1403,7 +1403,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
VM_BUG_ON(end & ~PAGE_MASK);
VM_BUG_ON_VMA(start < vma->vm_start, vma);
VM_BUG_ON_VMA(end > vma->vm_end, vma);
- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+ lockdep_assert_held(&mm->mmap_sem);
gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
if (vma->vm_flags & VM_LOCKONFAULT)
@@ -1214,7 +1214,7 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
next = pud_addr_end(addr, end);
if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
if (next - addr != HPAGE_PUD_SIZE) {
- VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
+ lockdep_assert_held(&tlb->mm->mmap_sem);
split_huge_pud(vma, pud, addr);
} else if (zap_huge_pud(tlb, vma, pud, addr))
goto next;
Use lockdep_assert_held when asserting that mmap_sem is held. Using this instead of rwsem_is_locked makes the assertions more tolerant of future changes to the lock type. Signed-off-by: Michel Lespinasse <walken@google.com> --- fs/userfaultfd.c | 6 +++--- mm/gup.c | 2 +- mm/memory.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-)