===================================================================
@@ -23,7 +23,7 @@
static inline bool btrfs_try_spin_lock(struct extent_buffer *eb)
{
- return mutex_trylock(&eb->lock);
+ return mutex_tryspin(&eb->lock);
}
static inline void btrfs_tree_lock(struct extent_buffer *eb)
===================================================================
@@ -157,6 +157,7 @@ extern int __must_check mutex_lock_killa
* Returns 1 if the mutex has been acquired successfully, and 0 on contention.
*/
extern int mutex_trylock(struct mutex *lock);
+extern int mutex_tryspin(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
===================================================================
@@ -126,20 +126,8 @@ void __sched mutex_unlock(struct mutex *
EXPORT_SYMBOL(mutex_unlock);
-/*
- * Lock a mutex (possibly interruptible), slowpath:
- */
-static inline int __sched
-__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
- unsigned long ip)
+static inline bool mutex_spin(struct mutex *lock)
{
- struct task_struct *task = current;
- struct mutex_waiter waiter;
- unsigned long flags;
-
- preempt_disable();
- mutex_acquire(&lock->dep_map, subclass, 0, ip);
-
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
* Optimistic spinning.
@@ -158,7 +146,6 @@ __mutex_lock_common(struct mutex *lock,
* We can't do this for DEBUG_MUTEXES because that relies on wait_lock
* to serialize everything.
*/
-
for (;;) {
struct thread_info *owner;
@@ -181,7 +168,7 @@ __mutex_lock_common(struct mutex *lock,
lock_acquired(&lock->dep_map, ip);
mutex_set_owner(lock);
preempt_enable();
- return 0;
+ return true;
}
/*
@@ -190,7 +177,7 @@ __mutex_lock_common(struct mutex *lock,
* we're an RT task that will live-lock because we won't let
* the owner complete.
*/
- if (!owner && (need_resched() || rt_task(task)))
+ if (!owner && (need_resched() || rt_task(current)))
break;
/*
@@ -202,6 +189,26 @@ __mutex_lock_common(struct mutex *lock,
cpu_relax();
}
#endif
+ return false;
+}
+
+/*
+ * Lock a mutex (possibly interruptible), slowpath:
+ */
+static inline int __sched
+__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ unsigned long ip)
+{
+ struct task_struct *task = current;
+ struct mutex_waiter waiter;
+ unsigned long flags;
+
+ preempt_disable();
+ mutex_acquire(&lock->dep_map, subclass, 0, ip);
+
+ if (mutex_spin(lock))
+ return 0;
+
spin_lock_mutex(&lock->wait_lock, flags);
debug_mutex_lock_common(lock, &waiter);
@@ -473,6 +480,25 @@ int __sched mutex_trylock(struct mutex *
}
EXPORT_SYMBOL(mutex_trylock);
+static inline int __mutex_tryspin_slowpath(atomic_t *lock_count)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ return __mutex_trylock_slowpath(lock_count) || mutex_spin(lock);
+}
+
+int __sched mutex_tryspin(struct mutex *lock)
+{
+ int ret;
+
+ ret = __mutex_fastpath_trylock(&lock->count, __mutex_tryspin_slowpath);
+ if (ret)
+ mutex_set_owner(lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(mutex_tryspin);
+
/**
* atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
* @cnt: the atomic which we are to dec