diff mbox series

[PULL,11/71] nbd: Restrict connection_co reentrance

Message ID 20190225152053.15976-12-kwolf@redhat.com (mailing list archive)
State New, archived
Headers show
Series [PULL,01/71] MAINTAINERS: Replace myself with John Snow for block jobs | expand

Commit Message

Kevin Wolf Feb. 25, 2019, 3:19 p.m. UTC
nbd_client_attach_aio_context() schedules connection_co in the new
AioContext and this way reenters it in any arbitrary place that has
yielded. We can restrict this a bit to the function call where the
coroutine actually sits waiting when it's idle.

This doesn't solve any bug yet, but it shows where in the code we need
to support this random reentrance and where we don't have to care.

Add FIXME comments for the existing bugs that the rest of this series
will fix.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
---
 block/nbd-client.h |  1 +
 block/nbd-client.c | 22 ++++++++++++++++++++++
 2 files changed, 23 insertions(+)
diff mbox series

Patch

diff --git a/block/nbd-client.h b/block/nbd-client.h
index d990207a5c..09e03013d2 100644
--- a/block/nbd-client.h
+++ b/block/nbd-client.h
@@ -35,6 +35,7 @@  typedef struct NBDClientSession {
 
     NBDClientRequest requests[MAX_NBD_REQUESTS];
     NBDReply reply;
+    BlockDriverState *bs;
     bool quit;
 } NBDClientSession;
 
diff --git a/block/nbd-client.c b/block/nbd-client.c
index f0ad54ce21..5ce4aa9520 100644
--- a/block/nbd-client.c
+++ b/block/nbd-client.c
@@ -76,8 +76,24 @@  static coroutine_fn void nbd_connection_entry(void *opaque)
     Error *local_err = NULL;
 
     while (!s->quit) {
+        /*
+         * The NBD client can only really be considered idle when it has
+         * yielded from qio_channel_readv_all_eof(), waiting for data. This is
+         * the point where the additional scheduled coroutine entry happens
+         * after nbd_client_attach_aio_context().
+         *
+         * Therefore we keep an additional in_flight reference all the time and
+         * only drop it temporarily here.
+         *
+         * FIXME This is not safe because the QIOChannel could wake up the
+         * coroutine for a second time; it is not prepared for coroutine
+         * resumption from external code.
+         */
+        bdrv_dec_in_flight(s->bs);
         assert(s->reply.handle == 0);
         ret = nbd_receive_reply(s->ioc, &s->reply, &local_err);
+        bdrv_inc_in_flight(s->bs);
+
         if (local_err) {
             trace_nbd_read_reply_entry_fail(ret, error_get_pretty(local_err));
             error_free(local_err);
@@ -116,6 +132,8 @@  static coroutine_fn void nbd_connection_entry(void *opaque)
 
     s->quit = true;
     nbd_recv_coroutines_wake_all(s);
+    bdrv_dec_in_flight(s->bs);
+
     s->connection_co = NULL;
     aio_wait_kick();
 }
@@ -970,6 +988,8 @@  void nbd_client_attach_aio_context(BlockDriverState *bs,
 {
     NBDClientSession *client = nbd_get_client_session(bs);
     qio_channel_attach_aio_context(QIO_CHANNEL(client->ioc), new_context);
+
+    /* FIXME Really need a bdrv_inc_in_flight() here */
     aio_co_schedule(new_context, client->connection_co);
 }
 
@@ -1076,6 +1096,7 @@  static int nbd_client_connect(BlockDriverState *bs,
      * kick the reply mechanism.  */
     qio_channel_set_blocking(QIO_CHANNEL(sioc), false, NULL);
     client->connection_co = qemu_coroutine_create(nbd_connection_entry, client);
+    bdrv_inc_in_flight(bs);
     nbd_client_attach_aio_context(bs, bdrv_get_aio_context(bs));
 
     logout("Established connection with NBD server\n");
@@ -1108,6 +1129,7 @@  int nbd_client_init(BlockDriverState *bs,
 {
     NBDClientSession *client = nbd_get_client_session(bs);
 
+    client->bs = bs;
     qemu_co_mutex_init(&client->send_mutex);
     qemu_co_queue_init(&client->free_sema);