@@ -156,9 +156,21 @@ bool qemu_co_queue_empty(CoQueue *queue);
/**
* Provides a mutex that can be used to synchronise coroutines
*/
+struct CoWaitRecord;
typedef struct CoMutex {
- bool locked;
- CoQueue queue;
+ /* Count of pending lockers; 0 for a free mutex, 1 for an
+ * uncontended mutex.
+ */
+ unsigned locked;
+
+ /* A queue of waiters. Elements are added atomically in front of
+ * from_push. to_pop is only populated, and popped from, by whoever
+ * is in charge of the next wakeup. This can be an unlocker or,
+ * through the handoff protocol, a locker that is about to go to sleep.
+ */
+ QSLIST_HEAD(, CoWaitRecord) from_push, to_pop;
+
+ unsigned handoff, sequence;
} CoMutex;
/**
@@ -203,6 +203,82 @@ static void test_multi_co_schedule_10(void)
test_multi_co_schedule(10);
}
+/* CoMutex thread-safety. */
+
+static uint32_t atomic_counter;
+static uint32_t counter;
+static CoMutex comutex;
+
+static void test_multi_co_mutex_entry(void *opaque)
+{
+ while (!atomic_mb_read(&now_stopping)) {
+ qemu_co_mutex_lock(&comutex);
+ counter++;
+ qemu_co_mutex_unlock(&comutex);
+
+ /* Increase atomic_counter *after* releasing the mutex. Otherwise
+ * there is a chance (it happens about 1 in 3 runs) that the iothread
+ * exits before the coroutine is woken up, causing a spurious
+ * assertion failure.
+ */
+ atomic_inc(&atomic_counter);
+ }
+
+}
+
+static void test_multi_co_mutex(int threads, int seconds)
+{
+ int i;
+
+ qemu_co_mutex_init(&comutex);
+ counter = 0;
+ atomic_counter = 0;
+ now_stopping = false;
+
+ create_aio_contexts();
+ assert(threads <= NUM_CONTEXTS);
+ for (i = 0; i < threads; i++) {
+ Coroutine *co1 = qemu_coroutine_create(test_multi_co_mutex_entry);
+ aio_co_schedule(ctx[i], co1);
+ }
+
+ g_usleep(seconds * 1000000);
+
+ atomic_mb_set(&now_stopping, true);
+ join_aio_contexts();
+ g_test_message("%d iterations/second\n", counter / seconds);
+ g_assert_cmpint(counter, ==, atomic_counter);
+}
+
+/* Testing with NUM_CONTEXTS threads focuses on the queue. The mutex however
+ * is too contended (and the threads spend too much time in aio_poll)
+ * to actually stress the handoff protocol.
+ */
+static void test_multi_co_mutex_1(void)
+{
+ test_multi_co_mutex(NUM_CONTEXTS, 1);
+}
+
+static void test_multi_co_mutex_10(void)
+{
+ test_multi_co_mutex(NUM_CONTEXTS, 10);
+}
+
+/* Testing with fewer threads stresses the handoff protocol too. Still, the
+ * case where the locker _can_ pick up a handoff is very rare, happening
+ * about 10 times in 1 million, so increase the runtime a bit compared to
+ * other "quick" testcases that only run for 1 second.
+ */
+static void test_multi_co_mutex_2_3(void)
+{
+ test_multi_co_mutex(2, 3);
+}
+
+static void test_multi_co_mutex_2_30(void)
+{
+ test_multi_co_mutex(2, 30);
+}
+
/* End of tests. */
int main(int argc, char **argv)
@@ -213,8 +289,12 @@ int main(int argc, char **argv)
g_test_add_func("/aio/multi/lifecycle", test_lifecycle);
if (g_test_quick()) {
g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1);
+ g_test_add_func("/aio/multi/mutex", test_multi_co_mutex_1);
+ g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_3);
} else {
g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10);
+ g_test_add_func("/aio/multi/mutex", test_multi_co_mutex_10);
+ g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_30);
}
return g_test_run();
}
@@ -985,6 +985,7 @@ qemu_coroutine_terminate(void *co) "self %p"
qemu_coroutine_queue_next(void *from, void *nxt) "%p->%p"
qemu_co_queue_run_restart(void *co) "co %p"
qemu_co_queue_next(void *ctx, void *nxt) "context %p next %p"
+qemu_co_mutex_lock_uncontended(void *mutex, void *self) "mutex %p self %p"
qemu_co_mutex_lock_entry(void *mutex, void *self) "mutex %p self %p"
qemu_co_mutex_lock_return(void *mutex, void *self) "mutex %p self %p"
qemu_co_mutex_unlock_entry(void *mutex, void *self) "mutex %p self %p"
@@ -20,6 +20,10 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
+ *
+ * The lock-free mutex implementation is based on OSv
+ * (core/lfmutex.cc, include/lockfree/mutex.hh).
+ * Copyright (C) 2013 Cloudius Systems, Ltd.
*/
#include "qemu/osdep.h"
@@ -105,24 +109,111 @@ bool qemu_co_queue_empty(CoQueue *queue)
return QSIMPLEQ_FIRST(&queue->entries) == NULL;
}
+/* The wait records are handled with a multiple-producer, single-consumer
+ * lock-free queue. There cannot be two concurrent pop_waiter() calls
+ * because pop_waiter() can only come when mutex->handoff is zero. This can
+ * happen in three cases:
+ * - in qemu_co_mutex_unlock, before the hand-off protocol has started.
+ * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and
+ * not take part in the handoff.
+ * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from
+ * qemu_co_mutex_unlock. In this case, qemu_co_mutex_unlock will fail
+ * the cmpxchg (it will see either 0 or the next sequence value) and
+ * exit. The next hand-off cannot begin until qemu_co_mutex_lock has
+ * woken up someone.
+ * - in qemu_co_mutex_unlock, if it takes the hand-off token itself.
+ * In this case another iterations starts with mutex->handoff == 0;
+ * a concurrent qemu_co_mutex_lock will fail the cmpxchg, and
+ * qemu_co_mutex_unlock will go back to case (1).
+ *
+ * The following functions manage this queue.
+ */
+typedef struct CoWaitRecord {
+ Coroutine *co;
+ QSLIST_ENTRY(CoWaitRecord) next;
+} CoWaitRecord;
+
+static void push_waiter(CoMutex *mutex, CoWaitRecord *w)
+{
+ w->co = qemu_coroutine_self();
+ QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next);
+}
+
+static void move_waiters(CoMutex *mutex)
+{
+ QSLIST_HEAD(, CoWaitRecord) reversed;
+ QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push);
+ while (!QSLIST_EMPTY(&reversed)) {
+ CoWaitRecord *w = QSLIST_FIRST(&reversed);
+ QSLIST_REMOVE_HEAD(&reversed, next);
+ QSLIST_INSERT_HEAD(&mutex->to_pop, w, next);
+ }
+}
+
+static CoWaitRecord *pop_waiter(CoMutex *mutex)
+{
+ CoWaitRecord *w;
+
+ if (QSLIST_EMPTY(&mutex->to_pop)) {
+ move_waiters(mutex);
+ if (QSLIST_EMPTY(&mutex->to_pop)) {
+ return NULL;
+ }
+ }
+ w = QSLIST_FIRST(&mutex->to_pop);
+ QSLIST_REMOVE_HEAD(&mutex->to_pop, next);
+ return w;
+}
+
+static bool has_waiters(CoMutex *mutex)
+{
+ return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push);
+}
+
void qemu_co_mutex_init(CoMutex *mutex)
{
memset(mutex, 0, sizeof(*mutex));
- qemu_co_queue_init(&mutex->queue);
}
void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
{
Coroutine *self = qemu_coroutine_self();
+ CoWaitRecord w;
+ unsigned old_handoff;
+
+ if (atomic_fetch_inc(&mutex->locked) == 0) {
+ /* Uncontended. */
+ trace_qemu_co_mutex_lock_uncontended(mutex, self);
+ return;
+ }
trace_qemu_co_mutex_lock_entry(mutex, self);
+ self->ctx = qemu_get_current_aio_context();
+ w.co = self;
+ push_waiter(mutex, &w);
- while (mutex->locked) {
- qemu_co_queue_wait(&mutex->queue);
- }
+ /* This is the "Responsibility Hand-Off" protocol; a lock() picks from
+ * a concurrent unlock() the responsibility of waking somebody up.
+ */
+ old_handoff = atomic_mb_read(&mutex->handoff);
+ if (old_handoff &&
+ has_waiters(mutex) &&
+ atomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) {
+ /* There can be no concurrent pops, because there can be only
+ * one active handoff at a time.
+ */
+ CoWaitRecord *to_wake = pop_waiter(mutex);
+ Coroutine *co = to_wake->co;
+ if (co == self) {
+ /* We got the lock ourselves! */
+ assert(to_wake == &w);
+ return;
+ }
- mutex->locked = true;
+ qemu_coroutine_wake(co->ctx, co);
+ }
+ qemu_coroutine_yield();
trace_qemu_co_mutex_lock_return(mutex, self);
}
@@ -132,12 +223,50 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
trace_qemu_co_mutex_unlock_entry(mutex, self);
- assert(mutex->locked == true);
+ assert(mutex->locked);
assert(qemu_in_coroutine());
- mutex->locked = false;
- qemu_co_queue_next(&mutex->queue);
+ if (atomic_fetch_dec(&mutex->locked) == 1) {
+ /* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */
+ return;
+ }
+
+ for (;;) {
+ CoWaitRecord *to_wake = pop_waiter(mutex);
+ unsigned our_handoff;
+
+ if (to_wake) {
+ Coroutine *co = to_wake->co;
+ qemu_coroutine_wake(co->ctx, co);
+ goto out;
+ }
+
+ /* Some concurrent lock() is in progress (we know this because of
+ * count) but it hasn't yet put itself on the wait queue.
+ * Pick a sequence number for the handoff protocol (not 0).
+ */
+ if (++mutex->sequence == 0) {
+ mutex->sequence = 1;
+ }
+
+ our_handoff = mutex->sequence;
+ atomic_mb_set(&mutex->handoff, our_handoff);
+ if (!has_waiters(mutex)) {
+ /* The concurrent lock has not added itself yet, so it
+ * will be able to pick our handoff.
+ */
+ goto out;
+ }
+
+ /* Try to do the handoff protocol ourselves; if somebody else has
+ * already taken it, however, we're done and they're responsible.
+ */
+ if (atomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) {
+ goto out;
+ }
+ }
+out:
trace_qemu_co_mutex_unlock_return(mutex, self);
}
This uses the lock-free mutex described in the paper '"Blocking without Locking", or LFTHREADS: A lock-free thread library' by Gidenstam and Papatriantafilou. The same technique is used in OSv, and in fact the code is essentially a conversion to C of OSv's code. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- include/qemu/coroutine.h | 16 ++++- tests/test-aio-multithread.c | 80 ++++++++++++++++++++++++ trace-events | 1 + util/qemu-coroutine-lock.c | 145 ++++++++++++++++++++++++++++++++++++++++--- 4 files changed, 232 insertions(+), 10 deletions(-)