diff mbox series

[v2,1/4] locking/mutex: implement mutex_trylock_nested

Message ID 20250409014136.2816971-2-mlevitsk@redhat.com (mailing list archive)
State New
Headers show
Series KVM: extract lock_all_vcpus/unlock_all_vcpus | expand

Commit Message

Maxim Levitsky April 9, 2025, 1:41 a.m. UTC
Allow to specify the lockdep subclass in mutex_trylock
instead of hardcoding it to 0.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 include/linux/mutex.h  |  8 ++++++++
 kernel/locking/mutex.c | 14 +++++++++++---
 2 files changed, 19 insertions(+), 3 deletions(-)

Comments

Peter Zijlstra April 10, 2025, 8:04 a.m. UTC | #1
On Tue, Apr 08, 2025 at 09:41:33PM -0400, Maxim Levitsky wrote:
> Allow to specify the lockdep subclass in mutex_trylock
> instead of hardcoding it to 0.

We disable a whole bunch of checks for trylock, simply because they do
not wait, therefore they cannot deadlock.

But I can't remember if they disable all the cases required to make
subclasses completely redundant -- memory suggests they do, but I've not
verified.

Please expand this Changelog to include definite proof that subclasses
make sense with trylock.
diff mbox series

Patch

diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 2143d05116be..ea568d6c4c68 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -193,7 +193,15 @@  extern void mutex_lock_io(struct mutex *lock);
  *
  * Returns 1 if the mutex has been acquired successfully, and 0 on contention.
  */
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern int mutex_trylock_nested(struct mutex *lock, unsigned int subclass);
+#define mutex_trylock(lock) mutex_trylock_nested(lock, 0)
+#else
 extern int mutex_trylock(struct mutex *lock);
+#define mutex_trylock_nested(lock, subclass) mutex_trylock(lock)
+#endif
+
 extern void mutex_unlock(struct mutex *lock);
 
 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 555e2b3a665a..5e3078865f2b 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -1062,6 +1062,7 @@  __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
 
 #endif
 
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
 /**
  * mutex_trylock - try to acquire the mutex, without waiting
  * @lock: the mutex to be acquired
@@ -1077,18 +1078,25 @@  __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
  * mutex must be released by the same task that acquired it.
  */
 int __sched mutex_trylock(struct mutex *lock)
+{
+	MUTEX_WARN_ON(lock->magic != lock);
+	return __mutex_trylock(lock);
+}
+EXPORT_SYMBOL(mutex_trylock);
+#else
+int __sched mutex_trylock_nested(struct mutex *lock, unsigned int subclass)
 {
 	bool locked;
 
 	MUTEX_WARN_ON(lock->magic != lock);
-
 	locked = __mutex_trylock(lock);
 	if (locked)
-		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+		mutex_acquire(&lock->dep_map, subclass, 1, _RET_IP_);
 
 	return locked;
 }
-EXPORT_SYMBOL(mutex_trylock);
+EXPORT_SYMBOL(mutex_trylock_nested);
+#endif
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
 int __sched