@@ -349,6 +349,19 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
vhost_dev_cleanup(&s->dev);
}
+static void vhost_user_blk_event(void *opaque, QEMUChrEvent event);
+
+static void vhost_user_blk_chr_closed_bh(void *opaque)
+{
+ DeviceState *dev = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserBlk *s = VHOST_USER_BLK(vdev);
+
+ vhost_user_blk_disconnect(dev);
+ qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
+ NULL, opaque, NULL, true);
+}
+
static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
{
DeviceState *dev = opaque;
@@ -363,7 +376,30 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
}
break;
case CHR_EVENT_CLOSED:
- vhost_user_blk_disconnect(dev);
+ /*
+ * A close event may happen during a read/write, but vhost
+ * code assumes the vhost_dev remains setup, so delay the
+ * stop & clear. There are two possible paths to hit this
+ * disconnect event:
+ * 1. When VM is in the RUN_STATE_PRELAUNCH state. The
+ * vhost_user_blk_device_realize() is a caller.
+ * 2. In tha main loop phase after VM start.
+ *
+ * For p2 the disconnect event will be delayed. We can't
+ * do the same for p1, because we are not running the loop
+ * at this moment. So just skip this step and perform
+ * disconnect in the caller function.
+ *
+ * TODO: maybe it is a good idea to make the same fix
+ * for other vhost-user devices.
+ */
+ if (runstate_is_running()) {
+ AioContext *ctx = qemu_get_current_aio_context();
+
+ qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, NULL, NULL,
+ NULL, NULL, false);
+ aio_bh_schedule_oneshot(ctx, vhost_user_blk_chr_closed_bh, opaque);
+ }
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN: