diff mbox

block: Fix bdrv_drain in coroutine

Message ID 1459503998-31592-1-git-send-email-famz@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Fam Zheng April 1, 2016, 9:46 a.m. UTC
Using the nested aio_poll() in coroutine is a bad idea. This patch
replaces the aio_poll loop in bdrv_drain with a BH, if called in
coroutine.

For example, the bdrv_drain() in mirror.c can hang when a guest issued
request is pending on it in qemu_co_mutex_lock().

Mirror coroutine in this case has just finished a request, and the block
job is about to complete. It calls bdrv_drain() which waits for the
other coroutine to complete. The other coroutine is a scsi-disk request.
The deadlock happens when the latter is in turn pending on the former to
yield/terminate, qemu_co_mutex_lock(). The state flow is as below
(assuming a qcow2 image):

  mirror coroutine               scsi-disk coroutine
  -------------------------------------------------------------
  do last write

    qcow2:qemu_co_mutex_lock()
    ...
                                 scsi disk read

                                   tracked request begin

                                   qcow2:qemu_co_mutex_lock.enter

    qcow2:qemu_co_mutex_unlock()

  bdrv_drain
    while (has tracked request)
      aio_poll()

In the scsi-disk coroutine, the qemu_co_mutex_lock() will never return
because the mirror coroutine is blocked in the aio_poll(blocking=true).

Reported-by: Laurent Vivier <lvivier@redhat.com>
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
---
 block/io.c | 40 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 40 insertions(+)

Comments

Paolo Bonzini April 1, 2016, 9:49 a.m. UTC | #1
On 01/04/2016 11:46, Fam Zheng wrote:
> +
> +static void bdrv_co_drain_bh_cb(void *opaque)
> +{
> +    BdrvCoDrainData *data = opaque;
> +    Coroutine *co = data->co;
> +
> +    bdrv_drain(data->bs);
> +    data->done = true;
> +    qemu_coroutine_enter(co, NULL);
> +}
> +
> +static void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
> +{
> +    QEMUBH *bh;
> +    BdrvCoDrainData data;
> +
> +    assert(qemu_in_coroutine());
> +    data = (BdrvCoDrainData) {
> +        .co = qemu_coroutine_self(),
> +        .bs = bs,
> +        .done = false,
> +    };
> +    bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data),
> +    qemu_bh_schedule(bh);
> +
> +    do {
> +        qemu_coroutine_yield();
> +    } while (!data.done);

The loop and "done" is not necessary.  Also,

> +    qemu_bh_delete(bh);

this can be moved to bdrv_co_drain_bh_cb before bdrv_drain, so that the
bottom half doesn't slow down the event loop until bdrv_drain completes.

Paolo

> +}
Fam Zheng April 1, 2016, 10:09 a.m. UTC | #2
On Fri, 04/01 11:49, Paolo Bonzini wrote:
> 
> 
> On 01/04/2016 11:46, Fam Zheng wrote:
> > +
> > +static void bdrv_co_drain_bh_cb(void *opaque)
> > +{
> > +    BdrvCoDrainData *data = opaque;
> > +    Coroutine *co = data->co;
> > +
> > +    bdrv_drain(data->bs);
> > +    data->done = true;
> > +    qemu_coroutine_enter(co, NULL);
> > +}
> > +
> > +static void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
> > +{
> > +    QEMUBH *bh;
> > +    BdrvCoDrainData data;
> > +
> > +    assert(qemu_in_coroutine());
> > +    data = (BdrvCoDrainData) {
> > +        .co = qemu_coroutine_self(),
> > +        .bs = bs,
> > +        .done = false,
> > +    };
> > +    bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data),
> > +    qemu_bh_schedule(bh);
> > +
> > +    do {
> > +        qemu_coroutine_yield();
> > +    } while (!data.done);
> 
> The loop and "done" is not necessary.  Also,

I was trying to protect against the bugs similar to the one fixed in
e424aff5f30, but you're right we can make this an assertion and the loop is not
needed. If a calling coroutine is resumed unexpectedly, we will catch it and
fix it.

> 
> > +    qemu_bh_delete(bh);
> 
> this can be moved to bdrv_co_drain_bh_cb before bdrv_drain, so that the
> bottom half doesn't slow down the event loop until bdrv_drain completes.

Good point! Will fix.

Thanks,

Fam
Laurent Vivier April 1, 2016, 12:05 p.m. UTC | #3
On 01/04/2016 11:46, Fam Zheng wrote:
> Using the nested aio_poll() in coroutine is a bad idea. This patch
> replaces the aio_poll loop in bdrv_drain with a BH, if called in
> coroutine.
> 
> For example, the bdrv_drain() in mirror.c can hang when a guest issued
> request is pending on it in qemu_co_mutex_lock().
> 
> Mirror coroutine in this case has just finished a request, and the block
> job is about to complete. It calls bdrv_drain() which waits for the
> other coroutine to complete. The other coroutine is a scsi-disk request.
> The deadlock happens when the latter is in turn pending on the former to
> yield/terminate, qemu_co_mutex_lock(). The state flow is as below
> (assuming a qcow2 image):
> 
>   mirror coroutine               scsi-disk coroutine
>   -------------------------------------------------------------
>   do last write
> 
>     qcow2:qemu_co_mutex_lock()
>     ...
>                                  scsi disk read
> 
>                                    tracked request begin
> 
>                                    qcow2:qemu_co_mutex_lock.enter
> 
>     qcow2:qemu_co_mutex_unlock()
> 
>   bdrv_drain
>     while (has tracked request)
>       aio_poll()
> 
> In the scsi-disk coroutine, the qemu_co_mutex_lock() will never return
> because the mirror coroutine is blocked in the aio_poll(blocking=true).
> 
> Reported-by: Laurent Vivier <lvivier@redhat.com>

I've checked this fixes the problem I have reported (on ppc64 and
x86_64), and it does.

Thanks Fam,
Laurent

> Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Fam Zheng <famz@redhat.com>
> ---
>  block/io.c | 40 ++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 40 insertions(+)
> 
> diff --git a/block/io.c b/block/io.c
> index c4869b9..d0a4551 100644
> --- a/block/io.c
> +++ b/block/io.c
> @@ -253,6 +253,42 @@ static void bdrv_drain_recurse(BlockDriverState *bs)
>      }
>  }
>  
> +typedef struct {
> +    Coroutine *co;
> +    BlockDriverState *bs;
> +    bool done;
> +} BdrvCoDrainData;
> +
> +static void bdrv_co_drain_bh_cb(void *opaque)
> +{
> +    BdrvCoDrainData *data = opaque;
> +    Coroutine *co = data->co;
> +
> +    bdrv_drain(data->bs);
> +    data->done = true;
> +    qemu_coroutine_enter(co, NULL);
> +}
> +
> +static void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
> +{
> +    QEMUBH *bh;
> +    BdrvCoDrainData data;
> +
> +    assert(qemu_in_coroutine());
> +    data = (BdrvCoDrainData) {
> +        .co = qemu_coroutine_self(),
> +        .bs = bs,
> +        .done = false,
> +    };
> +    bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data),
> +    qemu_bh_schedule(bh);
> +
> +    do {
> +        qemu_coroutine_yield();
> +    } while (!data.done);
> +    qemu_bh_delete(bh);
> +}
> +
>  /*
>   * Wait for pending requests to complete on a single BlockDriverState subtree,
>   * and suspend block driver's internal I/O until next request arrives.
> @@ -269,6 +305,10 @@ void bdrv_drain(BlockDriverState *bs)
>      bool busy = true;
>  
>      bdrv_drain_recurse(bs);
> +    if (qemu_in_coroutine()) {
> +        bdrv_co_drain(bs);
> +        return;
> +    }
>      while (busy) {
>          /* Keep iterating */
>           bdrv_flush_io_queue(bs);
>
diff mbox

Patch

diff --git a/block/io.c b/block/io.c
index c4869b9..d0a4551 100644
--- a/block/io.c
+++ b/block/io.c
@@ -253,6 +253,42 @@  static void bdrv_drain_recurse(BlockDriverState *bs)
     }
 }
 
+typedef struct {
+    Coroutine *co;
+    BlockDriverState *bs;
+    bool done;
+} BdrvCoDrainData;
+
+static void bdrv_co_drain_bh_cb(void *opaque)
+{
+    BdrvCoDrainData *data = opaque;
+    Coroutine *co = data->co;
+
+    bdrv_drain(data->bs);
+    data->done = true;
+    qemu_coroutine_enter(co, NULL);
+}
+
+static void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
+{
+    QEMUBH *bh;
+    BdrvCoDrainData data;
+
+    assert(qemu_in_coroutine());
+    data = (BdrvCoDrainData) {
+        .co = qemu_coroutine_self(),
+        .bs = bs,
+        .done = false,
+    };
+    bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data),
+    qemu_bh_schedule(bh);
+
+    do {
+        qemu_coroutine_yield();
+    } while (!data.done);
+    qemu_bh_delete(bh);
+}
+
 /*
  * Wait for pending requests to complete on a single BlockDriverState subtree,
  * and suspend block driver's internal I/O until next request arrives.
@@ -269,6 +305,10 @@  void bdrv_drain(BlockDriverState *bs)
     bool busy = true;
 
     bdrv_drain_recurse(bs);
+    if (qemu_in_coroutine()) {
+        bdrv_co_drain(bs);
+        return;
+    }
     while (busy) {
         /* Keep iterating */
          bdrv_flush_io_queue(bs);