diff mbox

[4/4] virtio-blk: Clean up start/stop with mutex and BH

Message ID 1458123018-18651-5-git-send-email-famz@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Fam Zheng March 16, 2016, 10:10 a.m. UTC
This is to make the dataplane start logic simpler to understand.

Start/stop take the mutex so we don't need the starting flag. The bottom
half is scheduled in the iothread to actually hook up request handlers
with vq.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
---
 hw/block/dataplane/virtio-blk.c | 58 +++++++++++++++++++++++++++++++----------
 1 file changed, 44 insertions(+), 14 deletions(-)

Comments

Stefan Hajnoczi March 17, 2016, 3 p.m. UTC | #1
On Wed, Mar 16, 2016 at 06:10:18PM +0800, Fam Zheng wrote:
> +    data = g_new(VirtIOBlockStartData, 1);
> +    data->vblk = vblk;
> +    data->bh = aio_bh_new(s->ctx, virtio_blk_data_plane_start_bh_cb, data);
> +    qemu_bh_schedule(data->bh);
> +    qemu_mutex_unlock(&s->start_stop_lock);
>      return;

This BH usage pattern is dangerous:

1. The BH is created and scheduled.
2. Before the BH executes the device is unrealized.
3. The data->bh pointer is inaccessible so we have a dangling BH that
   will access vblk after vblk has been freed.

In some cases it can be safe but I don't see why the pattern is safe in
this case.  Either the BH needs to hold some sort of reference to keep
vblk alive, or vblk needs to know about pending BHs so they can be
deleted.
Paolo Bonzini March 17, 2016, 3:07 p.m. UTC | #2
On 17/03/2016 16:00, Stefan Hajnoczi wrote:
>> > +    data = g_new(VirtIOBlockStartData, 1);
>> > +    data->vblk = vblk;
>> > +    data->bh = aio_bh_new(s->ctx, virtio_blk_data_plane_start_bh_cb, data);
>> > +    qemu_bh_schedule(data->bh);
>> > +    qemu_mutex_unlock(&s->start_stop_lock);
>> >      return;
> This BH usage pattern is dangerous:
> 
> 1. The BH is created and scheduled.
> 2. Before the BH executes the device is unrealized.
> 3. The data->bh pointer is inaccessible so we have a dangling BH that
>    will access vblk after vblk has been freed.
> 
> In some cases it can be safe but I don't see why the pattern is safe in
> this case.  Either the BH needs to hold some sort of reference to keep
> vblk alive, or vblk needs to know about pending BHs so they can be
> deleted.

You're right.  After unrealizing virtio_blk_data_plane_stop has set of
vblk->dataplane_started = false, so that's covered.  However, you still
need an object_ref/object_object_unref pair.

That said, Christian wasn't testing hotplug/hot-unplug so this shouldn't
matter in his case.  Let's see if we can catch the reentrancy with an
assertion...

Paolo
Fam Zheng March 22, 2016, 12:52 p.m. UTC | #3
On Thu, 03/17 16:07, Paolo Bonzini wrote:
> 
> 
> On 17/03/2016 16:00, Stefan Hajnoczi wrote:
> >> > +    data = g_new(VirtIOBlockStartData, 1);
> >> > +    data->vblk = vblk;
> >> > +    data->bh = aio_bh_new(s->ctx, virtio_blk_data_plane_start_bh_cb, data);
> >> > +    qemu_bh_schedule(data->bh);
> >> > +    qemu_mutex_unlock(&s->start_stop_lock);
> >> >      return;
> > This BH usage pattern is dangerous:
> > 
> > 1. The BH is created and scheduled.
> > 2. Before the BH executes the device is unrealized.
> > 3. The data->bh pointer is inaccessible so we have a dangling BH that
> >    will access vblk after vblk has been freed.
> > 
> > In some cases it can be safe but I don't see why the pattern is safe in
> > this case.  Either the BH needs to hold some sort of reference to keep
> > vblk alive, or vblk needs to know about pending BHs so they can be
> > deleted.
> 
> You're right.  After unrealizing virtio_blk_data_plane_stop has set of
> vblk->dataplane_started = false, so that's covered.  However, you still
> need an object_ref/object_object_unref pair.

Is it safe to call object_unref outside BQL?

Fam
diff mbox

Patch

diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 36f3d2b..9e5c543 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -26,7 +26,6 @@ 
 #include "qom/object_interfaces.h"
 
 struct VirtIOBlockDataPlane {
-    bool starting;
     bool stopping;
     bool disabled;
 
@@ -49,6 +48,8 @@  struct VirtIOBlockDataPlane {
 
     /* Operation blocker on BDS */
     Error *blocker;
+
+    QemuMutex start_stop_lock;
 };
 
 /* Raise an interrupt to signal guest, if necessary */
@@ -150,6 +151,7 @@  void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
     s = g_new0(VirtIOBlockDataPlane, 1);
     s->vdev = vdev;
     s->conf = conf;
+    qemu_mutex_init(&s->start_stop_lock);
 
     if (conf->iothread) {
         s->iothread = conf->iothread;
@@ -184,19 +186,47 @@  void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
     g_free(s);
 }
 
+typedef struct {
+    VirtIOBlock *vblk;
+    QEMUBH *bh;
+} VirtIOBlockStartData;
+
+static void virtio_blk_data_plane_start_bh_cb(void *opaque)
+{
+    VirtIOBlockStartData *data = opaque;
+    VirtIOBlockDataPlane *s = data->vblk->dataplane;
+
+    qemu_mutex_lock(&s->start_stop_lock);
+    if (!data->vblk->dataplane_started) {
+        goto out;
+    }
+    /* Kick right away to begin processing requests already in vring */
+    event_notifier_set(virtio_queue_get_host_notifier(s->vq));
+
+    /* Get this show started by hooking up our callbacks */
+    virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, true, true);
+
+out:
+    qemu_bh_delete(data->bh);
+    g_free(data);
+    qemu_mutex_unlock(&s->start_stop_lock);
+}
+
 /* Context: QEMU global mutex held */
 void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
 {
     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
     VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
+    VirtIOBlockStartData *data;
     int r;
 
-    if (vblk->dataplane_started || s->starting) {
+    qemu_mutex_lock(&s->start_stop_lock);
+    if (vblk->dataplane_started) {
+        qemu_mutex_unlock(&s->start_stop_lock);
         return;
     }
 
-    s->starting = true;
     s->vq = virtio_get_queue(s->vdev, 0);
 
     /* Set up guest notifier (irq) */
@@ -215,27 +245,24 @@  void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
         goto fail_host_notifier;
     }
 
-    s->starting = false;
     vblk->dataplane_started = true;
     trace_virtio_blk_data_plane_start(s);
 
     blk_set_aio_context(s->conf->conf.blk, s->ctx);
 
-    /* Kick right away to begin processing requests already in vring */
-    event_notifier_set(virtio_queue_get_host_notifier(s->vq));
-
-    /* Get this show started by hooking up our callbacks */
-    aio_context_acquire(s->ctx);
-    virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, true, true);
-    aio_context_release(s->ctx);
+    data = g_new(VirtIOBlockStartData, 1);
+    data->vblk = vblk;
+    data->bh = aio_bh_new(s->ctx, virtio_blk_data_plane_start_bh_cb, data);
+    qemu_bh_schedule(data->bh);
+    qemu_mutex_unlock(&s->start_stop_lock);
     return;
 
   fail_host_notifier:
     k->set_guest_notifiers(qbus->parent, 1, false);
   fail_guest_notifiers:
     s->disabled = true;
-    s->starting = false;
     vblk->dataplane_started = true;
+    qemu_mutex_unlock(&s->start_stop_lock);
 }
 
 /* Context: QEMU global mutex held */
@@ -245,15 +272,16 @@  void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
     VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
 
+    qemu_mutex_lock(&s->start_stop_lock);
     if (!vblk->dataplane_started || s->stopping) {
-        return;
+        goto out;
     }
 
     /* Better luck next time. */
     if (s->disabled) {
         s->disabled = false;
         vblk->dataplane_started = false;
-        return;
+        goto out;
     }
     s->stopping = true;
     trace_virtio_blk_data_plane_stop(s);
@@ -275,4 +303,6 @@  void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
 
     vblk->dataplane_started = false;
     s->stopping = false;
+out:
+    qemu_mutex_unlock(&s->start_stop_lock);
 }