diff mbox series

block: Fix hangs in synchronous APIs with iothreads

Message ID 20190107120248.19491-1-kwolf@redhat.com (mailing list archive)
State New, archived
Headers show
Series block: Fix hangs in synchronous APIs with iothreads | expand

Commit Message

Kevin Wolf Jan. 7, 2019, 12:02 p.m. UTC
In the block layer, synchronous APIs are often implemented by creating a
coroutine that calls the asynchronous coroutine-based implementation and
then waiting for completion with BDRV_POLL_WHILE().

For this to work with iothreads (more specifically, when the synchronous
API is called in a thread that is not the home thread of the block
device, so that the coroutine will run in a different thread), we must
make sure to call aio_wait_kick() at the end of the operation. Many
places are missing this, so that BDRV_POLL_WHILE() keeps hanging even if
the condition has long become false.

Note that bdrv_dec_in_flight() involves an aio_wait_kick() call. This
corresponds to the BDRV_POLL_WHILE() in the drain functions, but it is
generally not enough for most other operations because they haven't set
the return value in the coroutine entry stub yet. To avoid race
conditions there, we need to kick after setting the return value.

The race window is small enough that the problem doesn't usually surface
in the common path. However, it does surface and causes easily
reproducible hangs if the operation can return early before even calling
bdrv_inc/dec_in_flight, which many of them do (trivial error or no-op
success paths).

The bug in bdrv_truncate(), bdrv_check() and bdrv_invalidate_cache() is
slightly different: These functions even neglected to schedule the
coroutine in the home thread of the node. This avoids the hang, but is
obviously wrong, too. Fix those to schedule the coroutine in the right
AioContext in addition to adding aio_wait_kick() calls.

Cc: qemu-stable@nongnu.org
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
 block.c                     |   6 +-
 block/block-backend.c       |   5 +
 block/io.c                  |   8 +-
 block/nbd-client.c          |   1 +
 block/nvme.c                |   1 +
 block/qcow2.c               |   1 +
 block/qed.c                 |   1 +
 tests/test-block-iothread.c | 372 ++++++++++++++++++++++++++++++++++++
 tests/Makefile.include      |   2 +
 9 files changed, 394 insertions(+), 3 deletions(-)
 create mode 100644 tests/test-block-iothread.c

Comments

Stefan Hajnoczi Jan. 8, 2019, 4:27 p.m. UTC | #1
On Mon, Jan 07, 2019 at 01:02:48PM +0100, Kevin Wolf wrote:
> In the block layer, synchronous APIs are often implemented by creating a
> coroutine that calls the asynchronous coroutine-based implementation and
> then waiting for completion with BDRV_POLL_WHILE().
> 
> For this to work with iothreads (more specifically, when the synchronous
> API is called in a thread that is not the home thread of the block
> device, so that the coroutine will run in a different thread), we must
> make sure to call aio_wait_kick() at the end of the operation. Many
> places are missing this, so that BDRV_POLL_WHILE() keeps hanging even if
> the condition has long become false.
> 
> Note that bdrv_dec_in_flight() involves an aio_wait_kick() call. This
> corresponds to the BDRV_POLL_WHILE() in the drain functions, but it is
> generally not enough for most other operations because they haven't set
> the return value in the coroutine entry stub yet. To avoid race
> conditions there, we need to kick after setting the return value.
> 
> The race window is small enough that the problem doesn't usually surface
> in the common path. However, it does surface and causes easily
> reproducible hangs if the operation can return early before even calling
> bdrv_inc/dec_in_flight, which many of them do (trivial error or no-op
> success paths).
> 
> The bug in bdrv_truncate(), bdrv_check() and bdrv_invalidate_cache() is
> slightly different: These functions even neglected to schedule the
> coroutine in the home thread of the node. This avoids the hang, but is
> obviously wrong, too. Fix those to schedule the coroutine in the right
> AioContext in addition to adding aio_wait_kick() calls.
> 
> Cc: qemu-stable@nongnu.org
> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
> ---
>  block.c                     |   6 +-
>  block/block-backend.c       |   5 +
>  block/io.c                  |   8 +-
>  block/nbd-client.c          |   1 +
>  block/nvme.c                |   1 +
>  block/qcow2.c               |   1 +
>  block/qed.c                 |   1 +
>  tests/test-block-iothread.c | 372 ++++++++++++++++++++++++++++++++++++
>  tests/Makefile.include      |   2 +
>  9 files changed, 394 insertions(+), 3 deletions(-)
>  create mode 100644 tests/test-block-iothread.c

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
diff mbox series

Patch

diff --git a/block.c b/block.c
index 4f5ff2cc12..92a3b45a78 100644
--- a/block.c
+++ b/block.c
@@ -3725,6 +3725,7 @@  static void bdrv_check_co_entry(void *opaque)
 {
     CheckCo *cco = opaque;
     cco->ret = bdrv_co_check(cco->bs, cco->res, cco->fix);
+    aio_wait_kick();
 }
 
 int bdrv_check(BlockDriverState *bs,
@@ -3743,7 +3744,7 @@  int bdrv_check(BlockDriverState *bs,
         bdrv_check_co_entry(&cco);
     } else {
         co = qemu_coroutine_create(bdrv_check_co_entry, &cco);
-        qemu_coroutine_enter(co);
+        bdrv_coroutine_enter(bs, co);
         BDRV_POLL_WHILE(bs, cco.ret == -EINPROGRESS);
     }
 
@@ -4560,6 +4561,7 @@  static void coroutine_fn bdrv_invalidate_cache_co_entry(void *opaque)
     InvalidateCacheCo *ico = opaque;
     bdrv_co_invalidate_cache(ico->bs, ico->errp);
     ico->done = true;
+    aio_wait_kick();
 }
 
 void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
@@ -4576,7 +4578,7 @@  void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
         bdrv_invalidate_cache_co_entry(&ico);
     } else {
         co = qemu_coroutine_create(bdrv_invalidate_cache_co_entry, &ico);
-        qemu_coroutine_enter(co);
+        bdrv_coroutine_enter(bs, co);
         BDRV_POLL_WHILE(bs, !ico.done);
     }
 }
diff --git a/block/block-backend.c b/block/block-backend.c
index 60d37a0c3d..3a3d888633 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -1220,6 +1220,7 @@  static void blk_read_entry(void *opaque)
 
     rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, qiov->size,
                               qiov, rwco->flags);
+    aio_wait_kick();
 }
 
 static void blk_write_entry(void *opaque)
@@ -1229,6 +1230,7 @@  static void blk_write_entry(void *opaque)
 
     rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, qiov->size,
                                qiov, rwco->flags);
+    aio_wait_kick();
 }
 
 static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
@@ -1540,6 +1542,7 @@  static void blk_ioctl_entry(void *opaque)
 
     rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
                              qiov->iov[0].iov_base);
+    aio_wait_kick();
 }
 
 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
@@ -1586,6 +1589,7 @@  static void blk_flush_entry(void *opaque)
 {
     BlkRwCo *rwco = opaque;
     rwco->ret = blk_co_flush(rwco->blk);
+    aio_wait_kick();
 }
 
 int blk_flush(BlockBackend *blk)
@@ -2018,6 +2022,7 @@  static void blk_pdiscard_entry(void *opaque)
     QEMUIOVector *qiov = rwco->iobuf;
 
     rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, qiov->size);
+    aio_wait_kick();
 }
 
 int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
diff --git a/block/io.c b/block/io.c
index bd9d688f8b..213ca03d8d 100644
--- a/block/io.c
+++ b/block/io.c
@@ -806,6 +806,7 @@  static void coroutine_fn bdrv_rw_co_entry(void *opaque)
                                     rwco->qiov->size, rwco->qiov,
                                     rwco->flags);
     }
+    aio_wait_kick();
 }
 
 /*
@@ -2279,6 +2280,7 @@  static void coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
                                            data->offset, data->bytes,
                                            data->pnum, data->map, data->file);
     data->done = true;
+    aio_wait_kick();
 }
 
 /*
@@ -2438,6 +2440,7 @@  static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
 {
     BdrvVmstateCo *co = opaque;
     co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
+    aio_wait_kick();
 }
 
 static inline int
@@ -2559,6 +2562,7 @@  static void coroutine_fn bdrv_flush_co_entry(void *opaque)
     FlushCo *rwco = opaque;
 
     rwco->ret = bdrv_co_flush(rwco->bs);
+    aio_wait_kick();
 }
 
 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
@@ -2704,6 +2708,7 @@  static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
     DiscardCo *rwco = opaque;
 
     rwco->ret = bdrv_co_pdiscard(rwco->child, rwco->offset, rwco->bytes);
+    aio_wait_kick();
 }
 
 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int bytes)
@@ -3217,6 +3222,7 @@  static void coroutine_fn bdrv_truncate_co_entry(void *opaque)
     TruncateCo *tco = opaque;
     tco->ret = bdrv_co_truncate(tco->child, tco->offset, tco->prealloc,
                                 tco->errp);
+    aio_wait_kick();
 }
 
 int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc,
@@ -3236,7 +3242,7 @@  int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc,
         bdrv_truncate_co_entry(&tco);
     } else {
         co = qemu_coroutine_create(bdrv_truncate_co_entry, &tco);
-        qemu_coroutine_enter(co);
+        bdrv_coroutine_enter(child->bs, co);
         BDRV_POLL_WHILE(child->bs, tco.ret == NOT_DONE);
     }
 
diff --git a/block/nbd-client.c b/block/nbd-client.c
index fc5b7eda8e..a6b5605c0b 100644
--- a/block/nbd-client.c
+++ b/block/nbd-client.c
@@ -116,6 +116,7 @@  static coroutine_fn void nbd_read_reply_entry(void *opaque)
     s->quit = true;
     nbd_recv_coroutines_wake_all(s);
     s->read_reply_co = NULL;
+    aio_wait_kick();
 }
 
 static int nbd_co_send_request(BlockDriverState *bs,
diff --git a/block/nvme.c b/block/nvme.c
index 29294038fc..36779fdd3d 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -390,6 +390,7 @@  static void nvme_cmd_sync_cb(void *opaque, int ret)
 {
     int *pret = opaque;
     *pret = ret;
+    aio_wait_kick();
 }
 
 static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q,
diff --git a/block/qcow2.c b/block/qcow2.c
index 4897abae5e..8c91b92865 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -1671,6 +1671,7 @@  static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
         /* From bdrv_co_create.  */
         qcow2_open_entry(&qoc);
     } else {
+        assert(qemu_get_current_aio_context() == qemu_get_aio_context());
         qemu_coroutine_enter(qemu_coroutine_create(qcow2_open_entry, &qoc));
         BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
     }
diff --git a/block/qed.c b/block/qed.c
index 9377c0b7ad..1280870024 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -559,6 +559,7 @@  static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
     if (qemu_in_coroutine()) {
         bdrv_qed_open_entry(&qoc);
     } else {
+        assert(qemu_get_current_aio_context() == qemu_get_aio_context());
         qemu_coroutine_enter(qemu_coroutine_create(bdrv_qed_open_entry, &qoc));
         BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
     }
diff --git a/tests/test-block-iothread.c b/tests/test-block-iothread.c
new file mode 100644
index 0000000000..97ac0b159d
--- /dev/null
+++ b/tests/test-block-iothread.c
@@ -0,0 +1,372 @@ 
+/*
+ * Block tests for iothreads
+ *
+ * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "block/block.h"
+#include "block/blockjob_int.h"
+#include "sysemu/block-backend.h"
+#include "qapi/error.h"
+#include "iothread.h"
+
+static int coroutine_fn bdrv_test_co_prwv(BlockDriverState *bs,
+                                          uint64_t offset, uint64_t bytes,
+                                          QEMUIOVector *qiov, int flags)
+{
+    return 0;
+}
+
+static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs,
+                                              int64_t offset, int bytes)
+{
+    return 0;
+}
+
+static int coroutine_fn
+bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset,
+                      PreallocMode prealloc, Error **errp)
+{
+    return 0;
+}
+
+static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs,
+                                                  bool want_zero,
+                                                  int64_t offset, int64_t count,
+                                                  int64_t *pnum, int64_t *map,
+                                                  BlockDriverState **file)
+{
+    *pnum = count;
+    return 0;
+}
+
+static BlockDriver bdrv_test = {
+    .format_name            = "test",
+    .instance_size          = 1,
+
+    .bdrv_co_preadv         = bdrv_test_co_prwv,
+    .bdrv_co_pwritev        = bdrv_test_co_prwv,
+    .bdrv_co_pdiscard       = bdrv_test_co_pdiscard,
+    .bdrv_co_truncate       = bdrv_test_co_truncate,
+    .bdrv_co_block_status   = bdrv_test_co_block_status,
+};
+
+static void test_sync_op_pread(BdrvChild *c)
+{
+    uint8_t buf[512];
+    int ret;
+
+    /* Success */
+    ret = bdrv_pread(c, 0, buf, sizeof(buf));
+    g_assert_cmpint(ret, ==, 512);
+
+    /* Early error: Negative offset */
+    ret = bdrv_pread(c, -2, buf, sizeof(buf));
+    g_assert_cmpint(ret, ==, -EIO);
+}
+
+static void test_sync_op_pwrite(BdrvChild *c)
+{
+    uint8_t buf[512];
+    int ret;
+
+    /* Success */
+    ret = bdrv_pwrite(c, 0, buf, sizeof(buf));
+    g_assert_cmpint(ret, ==, 512);
+
+    /* Early error: Negative offset */
+    ret = bdrv_pwrite(c, -2, buf, sizeof(buf));
+    g_assert_cmpint(ret, ==, -EIO);
+}
+
+static void test_sync_op_blk_pread(BlockBackend *blk)
+{
+    uint8_t buf[512];
+    int ret;
+
+    /* Success */
+    ret = blk_pread(blk, 0, buf, sizeof(buf));
+    g_assert_cmpint(ret, ==, 512);
+
+    /* Early error: Negative offset */
+    ret = blk_pread(blk, -2, buf, sizeof(buf));
+    g_assert_cmpint(ret, ==, -EIO);
+}
+
+static void test_sync_op_blk_pwrite(BlockBackend *blk)
+{
+    uint8_t buf[512];
+    int ret;
+
+    /* Success */
+    ret = blk_pwrite(blk, 0, buf, sizeof(buf), 0);
+    g_assert_cmpint(ret, ==, 512);
+
+    /* Early error: Negative offset */
+    ret = blk_pwrite(blk, -2, buf, sizeof(buf), 0);
+    g_assert_cmpint(ret, ==, -EIO);
+}
+
+static void test_sync_op_load_vmstate(BdrvChild *c)
+{
+    uint8_t buf[512];
+    int ret;
+
+    /* Error: Driver does not support snapshots */
+    ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf));
+    g_assert_cmpint(ret, ==, -ENOTSUP);
+}
+
+static void test_sync_op_save_vmstate(BdrvChild *c)
+{
+    uint8_t buf[512];
+    int ret;
+
+    /* Error: Driver does not support snapshots */
+    ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf));
+    g_assert_cmpint(ret, ==, -ENOTSUP);
+}
+
+static void test_sync_op_pdiscard(BdrvChild *c)
+{
+    int ret;
+
+    /* Normal success path */
+    c->bs->open_flags |= BDRV_O_UNMAP;
+    ret = bdrv_pdiscard(c, 0, 512);
+    g_assert_cmpint(ret, ==, 0);
+
+    /* Early success: UNMAP not supported */
+    c->bs->open_flags &= ~BDRV_O_UNMAP;
+    ret = bdrv_pdiscard(c, 0, 512);
+    g_assert_cmpint(ret, ==, 0);
+
+    /* Early error: Negative offset */
+    ret = bdrv_pdiscard(c, -2, 512);
+    g_assert_cmpint(ret, ==, -EIO);
+}
+
+static void test_sync_op_blk_pdiscard(BlockBackend *blk)
+{
+    int ret;
+
+    /* Early success: UNMAP not supported */
+    ret = blk_pdiscard(blk, 0, 512);
+    g_assert_cmpint(ret, ==, 0);
+
+    /* Early error: Negative offset */
+    ret = blk_pdiscard(blk, -2, 512);
+    g_assert_cmpint(ret, ==, -EIO);
+}
+
+static void test_sync_op_truncate(BdrvChild *c)
+{
+    int ret;
+
+    /* Normal success path */
+    ret = bdrv_truncate(c, 65536, PREALLOC_MODE_OFF, NULL);
+    g_assert_cmpint(ret, ==, 0);
+
+    /* Early error: Negative offset */
+    ret = bdrv_truncate(c, -2, PREALLOC_MODE_OFF, NULL);
+    g_assert_cmpint(ret, ==, -EINVAL);
+
+    /* Error: Read-only image */
+    c->bs->read_only = true;
+    c->bs->open_flags &= ~BDRV_O_RDWR;
+
+    ret = bdrv_truncate(c, 65536, PREALLOC_MODE_OFF, NULL);
+    g_assert_cmpint(ret, ==, -EACCES);
+
+    c->bs->read_only = false;
+    c->bs->open_flags |= BDRV_O_RDWR;
+}
+
+static void test_sync_op_block_status(BdrvChild *c)
+{
+    int ret;
+    int64_t n;
+
+    /* Normal success path */
+    ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
+    g_assert_cmpint(ret, ==, 0);
+
+    /* Early success: No driver support */
+    bdrv_test.bdrv_co_block_status = NULL;
+    ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
+    g_assert_cmpint(ret, ==, 1);
+
+    /* Early success: bytes = 0 */
+    ret = bdrv_is_allocated(c->bs, 0, 0, &n);
+    g_assert_cmpint(ret, ==, 0);
+
+    /* Early success: Offset > image size*/
+    ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n);
+    g_assert_cmpint(ret, ==, 0);
+}
+
+static void test_sync_op_flush(BdrvChild *c)
+{
+    int ret;
+
+    /* Normal success path */
+    ret = bdrv_flush(c->bs);
+    g_assert_cmpint(ret, ==, 0);
+
+    /* Early success: Read-only image */
+    c->bs->read_only = true;
+    c->bs->open_flags &= ~BDRV_O_RDWR;
+
+    ret = bdrv_flush(c->bs);
+    g_assert_cmpint(ret, ==, 0);
+
+    c->bs->read_only = false;
+    c->bs->open_flags |= BDRV_O_RDWR;
+}
+
+static void test_sync_op_blk_flush(BlockBackend *blk)
+{
+    BlockDriverState *bs = blk_bs(blk);
+    int ret;
+
+    /* Normal success path */
+    ret = blk_flush(blk);
+    g_assert_cmpint(ret, ==, 0);
+
+    /* Early success: Read-only image */
+    bs->read_only = true;
+    bs->open_flags &= ~BDRV_O_RDWR;
+
+    ret = blk_flush(blk);
+    g_assert_cmpint(ret, ==, 0);
+
+    bs->read_only = false;
+    bs->open_flags |= BDRV_O_RDWR;
+}
+
+static void test_sync_op_check(BdrvChild *c)
+{
+    BdrvCheckResult result;
+    int ret;
+
+    /* Error: Driver does not implement check */
+    ret = bdrv_check(c->bs, &result, 0);
+    g_assert_cmpint(ret, ==, -ENOTSUP);
+}
+
+static void test_sync_op_invalidate_cache(BdrvChild *c)
+{
+    /* Early success: Image is not inactive */
+    bdrv_invalidate_cache(c->bs, NULL);
+}
+
+
+typedef struct SyncOpTest {
+    const char *name;
+    void (*fn)(BdrvChild *c);
+    void (*blkfn)(BlockBackend *blk);
+} SyncOpTest;
+
+const SyncOpTest sync_op_tests[] = {
+    {
+        .name   = "/sync-op/pread",
+        .fn     = test_sync_op_pread,
+        .blkfn  = test_sync_op_blk_pread,
+    }, {
+        .name   = "/sync-op/pwrite",
+        .fn     = test_sync_op_pwrite,
+        .blkfn  = test_sync_op_blk_pwrite,
+    }, {
+        .name   = "/sync-op/load_vmstate",
+        .fn     = test_sync_op_load_vmstate,
+    }, {
+        .name   = "/sync-op/save_vmstate",
+        .fn     = test_sync_op_save_vmstate,
+    }, {
+        .name   = "/sync-op/pdiscard",
+        .fn     = test_sync_op_pdiscard,
+        .blkfn  = test_sync_op_blk_pdiscard,
+    }, {
+        .name   = "/sync-op/truncate",
+        .fn     = test_sync_op_truncate,
+    }, {
+        .name   = "/sync-op/block_status",
+        .fn     = test_sync_op_block_status,
+    }, {
+        .name   = "/sync-op/flush",
+        .fn     = test_sync_op_flush,
+        .blkfn  = test_sync_op_blk_flush,
+    }, {
+        .name   = "/sync-op/check",
+        .fn     = test_sync_op_check,
+    }, {
+        .name   = "/sync-op/invalidate_cache",
+        .fn     = test_sync_op_invalidate_cache,
+    },
+};
+
+/* Test synchronous operations that run in a different iothread, so we have to
+ * poll for the coroutine there to return. */
+static void test_sync_op(const void *opaque)
+{
+    const SyncOpTest *t = opaque;
+    IOThread *iothread = iothread_new();
+    AioContext *ctx = iothread_get_aio_context(iothread);
+    BlockBackend *blk;
+    BlockDriverState *bs;
+    BdrvChild *c;
+
+    blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
+    bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
+    bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
+    blk_insert_bs(blk, bs, &error_abort);
+    c = QLIST_FIRST(&bs->parents);
+
+    blk_set_aio_context(blk, ctx);
+    aio_context_acquire(ctx);
+    t->fn(c);
+    if (t->blkfn) {
+        t->blkfn(blk);
+    }
+    aio_context_release(ctx);
+    blk_set_aio_context(blk, qemu_get_aio_context());
+
+    bdrv_unref(bs);
+    blk_unref(blk);
+}
+
+int main(int argc, char **argv)
+{
+    int i;
+
+    bdrv_init();
+    qemu_init_main_loop(&error_abort);
+
+    g_test_init(&argc, &argv, NULL);
+
+    for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) {
+        const SyncOpTest *t = &sync_op_tests[i];
+        g_test_add_data_func(t->name, t, test_sync_op);
+    }
+
+    return g_test_run();
+}
diff --git a/tests/Makefile.include b/tests/Makefile.include
index fb0b449c02..7a8bcc2da7 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -70,6 +70,7 @@  check-unit-y += tests/test-bdrv-drain$(EXESUF)
 check-unit-y += tests/test-blockjob$(EXESUF)
 check-unit-y += tests/test-blockjob-txn$(EXESUF)
 check-unit-y += tests/test-block-backend$(EXESUF)
+check-unit-y += tests/test-block-iothread$(EXESUF)
 check-unit-y += tests/test-image-locking$(EXESUF)
 check-unit-y += tests/test-x86-cpuid$(EXESUF)
 # all code tested by test-x86-cpuid is inside topology.h
@@ -538,6 +539,7 @@  tests/test-bdrv-drain$(EXESUF): tests/test-bdrv-drain.o $(test-block-obj-y) $(te
 tests/test-blockjob$(EXESUF): tests/test-blockjob.o $(test-block-obj-y) $(test-util-obj-y)
 tests/test-blockjob-txn$(EXESUF): tests/test-blockjob-txn.o $(test-block-obj-y) $(test-util-obj-y)
 tests/test-block-backend$(EXESUF): tests/test-block-backend.o $(test-block-obj-y) $(test-util-obj-y)
+tests/test-block-iothread$(EXESUF): tests/test-block-iothread.o $(test-block-obj-y) $(test-util-obj-y)
 tests/test-image-locking$(EXESUF): tests/test-image-locking.o $(test-block-obj-y) $(test-util-obj-y)
 tests/test-thread-pool$(EXESUF): tests/test-thread-pool.o $(test-block-obj-y)
 tests/test-iov$(EXESUF): tests/test-iov.o $(test-util-obj-y)