@@ -334,7 +334,9 @@ bool aio_dispatch(AioContext *ctx)
if (!node->deleted &&
(revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
node->io_read) {
+ aio_context_acquire(ctx);
node->io_read(node->opaque);
+ aio_context_release(ctx);
/* aio_notify() does not count as progress */
if (node->opaque != &ctx->notifier) {
@@ -344,7 +346,9 @@ bool aio_dispatch(AioContext *ctx)
if (!node->deleted &&
(revents & (G_IO_OUT | G_IO_ERR)) &&
node->io_write) {
+ aio_context_acquire(ctx);
node->io_write(node->opaque);
+ aio_context_release(ctx);
progress = true;
}
@@ -360,7 +364,9 @@ bool aio_dispatch(AioContext *ctx)
qemu_lockcnt_dec(&ctx->list_lock);
/* Run our timers */
+ aio_context_acquire(ctx);
progress |= timerlistgroup_run_timers(&ctx->tlg);
+ aio_context_release(ctx);
return progress;
}
@@ -418,7 +424,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking)
bool progress;
int64_t timeout;
- aio_context_acquire(ctx);
progress = false;
/* aio_notify can avoid the expensive event_notifier_set if
@@ -447,9 +452,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking)
timeout = blocking ? aio_compute_timeout(ctx) : 0;
/* wait until next event */
- if (timeout) {
- aio_context_release(ctx);
- }
if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
AioHandler epoll_handler;
@@ -464,9 +466,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking)
if (blocking) {
atomic_sub(&ctx->notify_me, 2);
}
- if (timeout) {
- aio_context_acquire(ctx);
- }
aio_notify_accept(ctx);
@@ -485,8 +484,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking)
progress = true;
}
- aio_context_release(ctx);
-
return progress;
}
@@ -245,7 +245,9 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
if (!node->deleted &&
(revents || event_notifier_get_handle(node->e) == event) &&
node->io_notify) {
+ aio_context_acquire(ctx);
node->io_notify(node->e);
+ aio_context_release(ctx);
/* aio_notify() does not count as progress */
if (node->e != &ctx->notifier) {
@@ -256,11 +258,15 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
if (!node->deleted &&
(node->io_read || node->io_write)) {
if ((revents & G_IO_IN) && node->io_read) {
+ aio_context_acquire(ctx);
node->io_read(node->opaque);
+ aio_context_release(ctx);
progress = true;
}
if ((revents & G_IO_OUT) && node->io_write) {
+ aio_context_acquire(ctx);
node->io_write(node->opaque);
+ aio_context_release(ctx);
progress = true;
}
@@ -305,7 +311,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking)
int count;
int timeout;
- aio_context_acquire(ctx);
progress = false;
/* aio_notify can avoid the expensive event_notifier_set if
@@ -347,17 +352,11 @@ bool aio_poll_internal(AioContext *ctx, bool blocking)
timeout = blocking && !have_select_revents
? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
- if (timeout) {
- aio_context_release(ctx);
- }
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
if (blocking) {
assert(first);
atomic_sub(&ctx->notify_me, 2);
}
- if (timeout) {
- aio_context_acquire(ctx);
- }
if (first) {
aio_notify_accept(ctx);
@@ -380,8 +379,8 @@ bool aio_poll_internal(AioContext *ctx, bool blocking)
progress |= aio_dispatch_handlers(ctx, event);
} while (count > 0);
+ aio_context_acquire(ctx);
progress |= timerlistgroup_run_timers(&ctx->tlg);
-
aio_context_release(ctx);
return progress;
}
@@ -88,7 +88,9 @@ int aio_bh_poll(AioContext *ctx)
ret = 1;
}
bh->idle = 0;
+ aio_context_acquire(ctx);
aio_bh_call(bh);
+ aio_context_release(ctx);
}
}
The AioContext data structures are now protected by list_lock and/or they are walked with FOREACH_RCU primitives. There is no need anymore to acquire the AioContext for the entire duration of aio_dispatch. Instead, just acquire it before and after invoking the callbacks. The next step is then to push it further down. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- aio-posix.c | 15 ++++++--------- aio-win32.c | 15 +++++++-------- async.c | 2 ++ 3 files changed, 15 insertions(+), 17 deletions(-)