diff mbox series

[for-8.2] export/vhost-user-blk: Fix consecutive drains

Message ID 20231124174436.46536-1-kwolf@redhat.com (mailing list archive)
State New, archived
Headers show
Series [for-8.2] export/vhost-user-blk: Fix consecutive drains | expand

Commit Message

Kevin Wolf Nov. 24, 2023, 5:44 p.m. UTC
The vhost-user-blk export implement AioContext switches in its drain
implementation. This means that on drain_begin, it detaches the server
from its AioContext and on drain_end, attaches it again and schedules
the server->co_trip coroutine in the updated AioContext.

However, nothing guarantees that server->co_trip is even safe to be
scheduled. Not only is it unclear that the coroutine is actually in a
state where it can be reentered externally without causing problems, but
with two consecutive drains, it is possible that the scheduled coroutine
didn't have a chance yet to run and trying to schedule an already
scheduled coroutine a second time crashes with an assertion failure.

Following the model of NBD, this commit makes the vhost-user-blk export
shut down server->co_trip during drain so that resuming the export means
creating and scheduling a new coroutine, which is always safe.

There is one exception: If the drain call didn't poll (for example, this
happens in the context of bdrv_graph_wrlock()), then the coroutine
didn't have a chance to shut down. However, in this case the AioContext
can't have changed; changing the AioContext always involves a polling
drain. So in this case we can simply assert that the AioContext is
unchanged and just leave the coroutine running or wake it up if it has
yielded to wait for the AioContext to be attached again.

Fixes: e1054cd4aad03a493a5d1cded7508f7c348205bf
Fixes: https://issues.redhat.com/browse/RHEL-1708
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
 include/qemu/vhost-user-server.h     |  2 ++
 block/export/vhost-user-blk-server.c |  9 +++++--
 util/vhost-user-server.c             | 36 +++++++++++++++++++++++-----
 3 files changed, 39 insertions(+), 8 deletions(-)

Comments

Kevin Wolf Nov. 27, 2023, 11:55 a.m. UTC | #1
Am 24.11.2023 um 18:44 hat Kevin Wolf geschrieben:
> The vhost-user-blk export implement AioContext switches in its drain
> implementation. This means that on drain_begin, it detaches the server
> from its AioContext and on drain_end, attaches it again and schedules
> the server->co_trip coroutine in the updated AioContext.
> 
> However, nothing guarantees that server->co_trip is even safe to be
> scheduled. Not only is it unclear that the coroutine is actually in a
> state where it can be reentered externally without causing problems, but
> with two consecutive drains, it is possible that the scheduled coroutine
> didn't have a chance yet to run and trying to schedule an already
> scheduled coroutine a second time crashes with an assertion failure.
> 
> Following the model of NBD, this commit makes the vhost-user-blk export
> shut down server->co_trip during drain so that resuming the export means
> creating and scheduling a new coroutine, which is always safe.
> 
> There is one exception: If the drain call didn't poll (for example, this
> happens in the context of bdrv_graph_wrlock()), then the coroutine
> didn't have a chance to shut down. However, in this case the AioContext
> can't have changed; changing the AioContext always involves a polling
> drain. So in this case we can simply assert that the AioContext is
> unchanged and just leave the coroutine running or wake it up if it has
> yielded to wait for the AioContext to be attached again.
> 
> Fixes: e1054cd4aad03a493a5d1cded7508f7c348205bf
> Fixes: https://issues.redhat.com/browse/RHEL-1708
> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
> ---
>  include/qemu/vhost-user-server.h     |  2 ++
>  block/export/vhost-user-blk-server.c |  9 +++++--
>  util/vhost-user-server.c             | 36 +++++++++++++++++++++++-----
>  3 files changed, 39 insertions(+), 8 deletions(-)
> 
> diff --git a/include/qemu/vhost-user-server.h b/include/qemu/vhost-user-server.h
> index 64ad701015..ca1713b53e 100644
> --- a/include/qemu/vhost-user-server.h
> +++ b/include/qemu/vhost-user-server.h
> @@ -45,6 +45,8 @@ typedef struct {
>      /* Protected by ctx lock */
>      bool in_qio_channel_yield;
>      bool wait_idle;
> +    bool quiescing;
> +    bool wake_on_ctx_attach;
>      VuDev vu_dev;
>      QIOChannel *ioc; /* The I/O channel with the client */
>      QIOChannelSocket *sioc; /* The underlying data channel with the client */
> diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c
> index fe2cee3a78..16f48388d3 100644
> --- a/block/export/vhost-user-blk-server.c
> +++ b/block/export/vhost-user-blk-server.c
> @@ -283,6 +283,7 @@ static void vu_blk_drained_begin(void *opaque)
>  {
>      VuBlkExport *vexp = opaque;
>  
> +    vexp->vu_server.quiescing = true;
>      vhost_user_server_detach_aio_context(&vexp->vu_server);
>  }
>  
> @@ -291,19 +292,23 @@ static void vu_blk_drained_end(void *opaque)
>  {
>      VuBlkExport *vexp = opaque;
>  
> +    vexp->vu_server.quiescing = false;
>      vhost_user_server_attach_aio_context(&vexp->vu_server, vexp->export.ctx);
>  }
>  
>  /*
> - * Ensures that bdrv_drained_begin() waits until in-flight requests complete.
> + * Ensures that bdrv_drained_begin() waits until in-flight requests complete
> + * and the server->co_trip coroutine has terminated. It will be restarted in
> + * vhost_user_server_attach_aio_context().
>   *
>   * Called with vexp->export.ctx acquired.
>   */
>  static bool vu_blk_drained_poll(void *opaque)
>  {
>      VuBlkExport *vexp = opaque;
> +    VuServer *server = &vexp->vu_server;
>  
> -    return vhost_user_server_has_in_flight(&vexp->vu_server);
> +    return server->co_trip || vhost_user_server_has_in_flight(server);
>  }
>  
>  static const BlockDevOps vu_blk_dev_ops = {
> diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c
> index 5ccc6d24a0..23004d0c62 100644
> --- a/util/vhost-user-server.c
> +++ b/util/vhost-user-server.c
> @@ -133,7 +133,9 @@ vu_message_read(VuDev *vu_dev, int conn_fd, VhostUserMsg *vmsg)
>                      server->in_qio_channel_yield = false;
>                  } else {
>                      /* Wait until attached to an AioContext again */
> +                    server->wake_on_ctx_attach = true;
>                      qemu_coroutine_yield();
> +                    assert(!server->wake_on_ctx_attach);
>                  }

Yielding here isn't good enough as drained_poll waits for the coroutine
to terminate, and if the coroutine is here, it will hang. v2 will return
instead.

Kevin
diff mbox series

Patch

diff --git a/include/qemu/vhost-user-server.h b/include/qemu/vhost-user-server.h
index 64ad701015..ca1713b53e 100644
--- a/include/qemu/vhost-user-server.h
+++ b/include/qemu/vhost-user-server.h
@@ -45,6 +45,8 @@  typedef struct {
     /* Protected by ctx lock */
     bool in_qio_channel_yield;
     bool wait_idle;
+    bool quiescing;
+    bool wake_on_ctx_attach;
     VuDev vu_dev;
     QIOChannel *ioc; /* The I/O channel with the client */
     QIOChannelSocket *sioc; /* The underlying data channel with the client */
diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c
index fe2cee3a78..16f48388d3 100644
--- a/block/export/vhost-user-blk-server.c
+++ b/block/export/vhost-user-blk-server.c
@@ -283,6 +283,7 @@  static void vu_blk_drained_begin(void *opaque)
 {
     VuBlkExport *vexp = opaque;
 
+    vexp->vu_server.quiescing = true;
     vhost_user_server_detach_aio_context(&vexp->vu_server);
 }
 
@@ -291,19 +292,23 @@  static void vu_blk_drained_end(void *opaque)
 {
     VuBlkExport *vexp = opaque;
 
+    vexp->vu_server.quiescing = false;
     vhost_user_server_attach_aio_context(&vexp->vu_server, vexp->export.ctx);
 }
 
 /*
- * Ensures that bdrv_drained_begin() waits until in-flight requests complete.
+ * Ensures that bdrv_drained_begin() waits until in-flight requests complete
+ * and the server->co_trip coroutine has terminated. It will be restarted in
+ * vhost_user_server_attach_aio_context().
  *
  * Called with vexp->export.ctx acquired.
  */
 static bool vu_blk_drained_poll(void *opaque)
 {
     VuBlkExport *vexp = opaque;
+    VuServer *server = &vexp->vu_server;
 
-    return vhost_user_server_has_in_flight(&vexp->vu_server);
+    return server->co_trip || vhost_user_server_has_in_flight(server);
 }
 
 static const BlockDevOps vu_blk_dev_ops = {
diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c
index 5ccc6d24a0..23004d0c62 100644
--- a/util/vhost-user-server.c
+++ b/util/vhost-user-server.c
@@ -133,7 +133,9 @@  vu_message_read(VuDev *vu_dev, int conn_fd, VhostUserMsg *vmsg)
                     server->in_qio_channel_yield = false;
                 } else {
                     /* Wait until attached to an AioContext again */
+                    server->wake_on_ctx_attach = true;
                     qemu_coroutine_yield();
+                    assert(!server->wake_on_ctx_attach);
                 }
                 continue;
             } else {
@@ -201,8 +203,15 @@  static coroutine_fn void vu_client_trip(void *opaque)
     VuServer *server = opaque;
     VuDev *vu_dev = &server->vu_dev;
 
-    while (!vu_dev->broken && vu_dispatch(vu_dev)) {
-        /* Keep running */
+    while (!vu_dev->broken) {
+        if (server->quiescing) {
+            server->co_trip = NULL;
+            aio_wait_kick();
+            return;
+        }
+        if (!vu_dispatch(vu_dev)) {
+            break;
+        }
     }
 
     if (vhost_user_server_has_in_flight(server)) {
@@ -353,8 +362,7 @@  static void vu_accept(QIONetListener *listener, QIOChannelSocket *sioc,
 
     qio_channel_set_follow_coroutine_ctx(server->ioc, true);
 
-    server->co_trip = qemu_coroutine_create(vu_client_trip, server);
-
+    /* Attaching the AioContext starts the vu_client_trip coroutine */
     aio_context_acquire(server->ctx);
     vhost_user_server_attach_aio_context(server, server->ctx);
     aio_context_release(server->ctx);
@@ -413,8 +421,24 @@  void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx)
                            NULL, NULL, vu_fd_watch);
     }
 
-    assert(!server->in_qio_channel_yield);
-    aio_co_schedule(ctx, server->co_trip);
+    if (server->co_trip) {
+        /*
+         * The caller didn't fully shut down co_trip (this can happen on
+         * non-polling drains like in bdrv_graph_wrlock()). This is okay as long
+         * as it no longer tries to shut it down and we're guaranteed to still
+         * be in the same AioContext as before.
+         */
+        assert(!server->quiescing);
+        assert(qemu_coroutine_get_aio_context(server->co_trip) == ctx);
+        if (server->wake_on_ctx_attach) {
+            server->wake_on_ctx_attach = false;
+            aio_co_wake(server->co_trip);
+        }
+    } else {
+        server->co_trip = qemu_coroutine_create(vu_client_trip, server);
+        assert(!server->in_qio_channel_yield);
+        aio_co_schedule(ctx, server->co_trip);
+    }
 }
 
 /* Called with server->ctx acquired */