@@ -6,7 +6,7 @@
#include <asm/spinlock.h>
/* MCS spin-locking. */
-#define arch_mcs_spin_lock_contended(lock) \
+#define arch_mcs_spin_lock(lock) \
do { \
/* Ensure prior stores are observed before we enter wfe. */ \
smp_mb(); \
@@ -14,9 +14,9 @@ do { \
wfe(); \
} while (0) \
-#define arch_mcs_spin_unlock_contended(lock) \
+#define arch_mcs_pass_lock(lock, val) \
do { \
- smp_store_release(lock, 1); \
+ smp_store_release((lock), (val)); \
dsb_sev(); \
} while (0)
@@ -4,8 +4,8 @@
/*
* Architectures can define their own:
*
- * arch_mcs_spin_lock_contended(l)
- * arch_mcs_spin_unlock_contended(l)
+ * arch_mcs_spin_lock(l)
+ * arch_mcs_pass_lock(l, val)
*
* See kernel/locking/mcs_spinlock.c.
*/
@@ -21,7 +21,7 @@ struct mcs_spinlock {
int count; /* nesting count, see qspinlock.c */
};
-#ifndef arch_mcs_spin_lock_contended
+#ifndef arch_mcs_spin_lock
/*
* Using smp_cond_load_acquire() provides the acquire semantics
* required so that subsequent operations happen after the
@@ -29,20 +29,20 @@ struct mcs_spinlock {
* ARM64 would like to do spin-waiting instead of purely
* spinning, and smp_cond_load_acquire() provides that behavior.
*/
-#define arch_mcs_spin_lock_contended(l) \
-do { \
- smp_cond_load_acquire(l, VAL); \
+#define arch_mcs_spin_lock(l) \
+do { \
+ smp_cond_load_acquire(l, VAL); \
} while (0)
#endif
-#ifndef arch_mcs_spin_unlock_contended
+#ifndef arch_mcs_spin_unlock
/*
* smp_store_release() provides a memory barrier to ensure all
* operations in the critical section has been completed before
* unlocking.
*/
-#define arch_mcs_spin_unlock_contended(l) \
- smp_store_release((l), 1)
+#define arch_mcs_pass_lock(l, val) \
+ smp_store_release((l), (val))
#endif
/*
@@ -91,7 +91,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
WRITE_ONCE(prev->next, node);
/* Wait until the lock holder passes the lock down. */
- arch_mcs_spin_lock_contended(&node->locked);
+ arch_mcs_spin_lock(&node->locked);
}
/*
@@ -115,7 +115,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
}
/* Pass lock to next waiter. */
- arch_mcs_spin_unlock_contended(&next->locked);
+ arch_mcs_pass_lock(&next->locked, 1);
}
#endif /* __LINUX_MCS_SPINLOCK_H */
@@ -470,7 +470,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
WRITE_ONCE(prev->next, node);
pv_wait_node(node, prev);
- arch_mcs_spin_lock_contended(&node->locked);
+ arch_mcs_spin_lock(&node->locked);
/*
* While waiting for the MCS lock, the next pointer may have
@@ -549,7 +549,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
if (!next)
next = smp_cond_load_relaxed(&node->next, (VAL));
- arch_mcs_spin_unlock_contended(&next->locked);
+ arch_mcs_pass_lock(&next->locked, 1);
pv_kick_node(lock, next);
release:
@@ -368,7 +368,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
*
* Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
*
- * The write to next->locked in arch_mcs_spin_unlock_contended()
+ * The write to next->locked in arch_mcs_pass_lock()
* must be ordered before the read of pn->state in the cmpxchg()
* below for the code to work correctly. To guarantee full ordering
* irrespective of the success or failure of the cmpxchg(),