@@ -401,7 +401,7 @@ static void add_pollfd(AioHandler *node)
npfd++;
}
-bool aio_poll(AioContext *ctx, bool blocking)
+bool aio_poll_internal(AioContext *ctx, bool blocking)
{
AioHandler *node;
int i, ret;
@@ -281,7 +281,7 @@ bool aio_dispatch(AioContext *ctx)
return progress;
}
-bool aio_poll(AioContext *ctx, bool blocking)
+bool aio_poll_internal(AioContext *ctx, bool blocking)
{
AioHandler *node;
HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
@@ -300,6 +300,14 @@ void aio_notify_accept(AioContext *ctx)
}
}
+bool aio_poll(AioContext *ctx, bool blocking)
+{
+ assert(qemu_mutex_iothread_locked() ||
+ aio_context_in_iothread(ctx));
+
+ return aio_poll_internal(ctx, blocking);
+}
+
static void aio_timerlist_notify(void *opaque)
{
aio_notify(opaque);
@@ -287,6 +287,12 @@ bool aio_pending(AioContext *ctx);
*/
bool aio_dispatch(AioContext *ctx);
+/* Same as aio_poll, but only meant for use in the I/O thread.
+ *
+ * This is used internally in the implementation of aio_poll.
+ */
+bool aio_poll_internal(AioContext *ctx, bool blocking);
+
/* Progress in completing AIO work to occur. This can issue new pending
* aio as a result of executing I/O completion or bh callbacks.
*
Move the implemntation of aio_poll to aio_poll_internal, and introduce a wrapper for public use. For now it just asserts that aio_poll is being used correctly---either from the thread that manages the context, or with the QEMU global mutex held. The next patch, however, will completely separate the two cases. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- aio-posix.c | 2 +- aio-win32.c | 2 +- async.c | 8 ++++++++ include/block/aio.h | 6 ++++++ 4 files changed, 16 insertions(+), 2 deletions(-)