@@ -465,24 +465,21 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
* and nobody is pending, clear the tail code and grab the lock.
* Otherwise, we only need to grab the lock.
*/
- for (;;) {
- /* In the PV case we might already have _Q_LOCKED_VAL set */
- if ((val & _Q_TAIL_MASK) != tail || (val & _Q_PENDING_MASK)) {
- set_locked(lock);
- break;
- }
+
+ /* In the PV case we might already have _Q_LOCKED_VAL set */
+ if ((val & _Q_TAIL_MASK) == tail) {
/*
* The smp_cond_load_acquire() call above has provided the
- * necessary acquire semantics required for locking. At most
- * two iterations of this loop may be ran.
+ * necessary acquire semantics required for locking.
*/
old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
if (old == val)
- goto release; /* No contention */
-
- val = old;
+ goto release; /* No contention */
}
+ /* Either somebody is queued behind us or _Q_PENDING_VAL is set */
+ set_locked(lock);
+
/*
* contended path; wait for next if not observed yet, release.
*/