diff mbox series

[5/5] xfs: Stop using lockdep to assert that locks are held

Message ID 20230907174705.2976191-6-willy@infradead.org (mailing list archive)
State New
Headers show
Series Remove the XFS mrlock | expand

Commit Message

Matthew Wilcox Sept. 7, 2023, 5:47 p.m. UTC
Lockdep does not know that the worker thread has inherited the lock
from its caller.  Rather than dance around moving the ownership from the
caller to the thread and back again, just remove the lockdep assertions
and rely on the rwsem itself to tell us whether _somebody_ is holding
the lock at the moment.

__xfs_rwsem_islocked() simplifies into a trivial function, which is easy
to inline into xfs_isilocked().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/xfs/xfs_inode.c | 40 ++++++++--------------------------------
 1 file changed, 8 insertions(+), 32 deletions(-)
diff mbox series

Patch

diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index c3cd73c29868..81ee6bf8c662 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -334,29 +334,6 @@  xfs_ilock_demote(
 }
 
 #if defined(DEBUG) || defined(XFS_WARN)
-static inline bool
-__xfs_rwsem_islocked(
-	struct rw_semaphore	*rwsem,
-	bool			shared)
-{
-	if (!debug_locks) {
-		if (!shared)
-			return rwsem_is_write_locked(rwsem);
-		return rwsem_is_locked(rwsem);
-	}
-
-	if (!shared)
-		return lockdep_is_held_type(rwsem, 0);
-
-	/*
-	 * We are checking that the lock is held at least in shared
-	 * mode but don't care that it might be held exclusively
-	 * (i.e. shared | excl). Hence we check if the lock is held
-	 * in any mode rather than an explicit shared mode.
-	 */
-	return lockdep_is_held_type(rwsem, -1);
-}
-
 bool
 xfs_isilocked(
 	struct xfs_inode	*ip,
@@ -366,15 +343,14 @@  xfs_isilocked(
 		return rwsem_is_locked(&ip->i_lock);
 	if (lock_flags & XFS_ILOCK_EXCL)
 		return rwsem_is_write_locked(&ip->i_lock);
-	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
-		return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock,
-				(lock_flags & XFS_MMAPLOCK_SHARED));
-	}
-
-	if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
-		return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
-				(lock_flags & XFS_IOLOCK_SHARED));
-	}
+	if (lock_flags & XFS_MMAPLOCK_SHARED)
+		return rwsem_is_locked(&VFS_I(ip)->i_mapping->invalidate_lock);
+	if (lock_flags & XFS_MMAPLOCK_EXCL)
+		return rwsem_is_write_locked(&VFS_I(ip)->i_mapping->invalidate_lock);
+	if (lock_flags & XFS_IOLOCK_SHARED)
+		return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
+	if (lock_flags & XFS_IOLOCK_EXCL)
+		return rwsem_is_write_locked(&VFS_I(ip)->i_rwsem);
 
 	ASSERT(0);
 	return false;