diff mbox

[5/9] virtio-blk: multiqueue batch notify

Message ID 1463787632-7241-6-git-send-email-stefanha@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Stefan Hajnoczi May 20, 2016, 11:40 p.m. UTC
The batch notification BH needs to know which virtqueues to notify when
multiqueue is enabled.  Use a bitmap to track the virtqueues that with
pending notifications.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 hw/block/virtio-blk.c          | 20 ++++++++++++++++----
 include/hw/virtio/virtio-blk.h |  1 +
 2 files changed, 17 insertions(+), 4 deletions(-)

Comments

Paolo Bonzini May 21, 2016, 4:02 p.m. UTC | #1
On 21/05/2016 01:40, Stefan Hajnoczi wrote:
> +    while ((i = find_next_bit(s->batch_notify_vqs, nvqs, i)) < nvqs) {
> +        VirtQueue *vq = virtio_get_queue(vdev, i);
> +
> +        bitmap_clear(s->batch_notify_vqs, i, 1);

clear_bit?

> +        if (s->dataplane_started && !s->dataplane_disabled) {
> +            virtio_blk_data_plane_notify(s->dataplane, vq);
> +        } else {
> +            virtio_notify(vdev, vq);
> +        }

The find_next_bit loop is not very efficient and could use something
similar to commit 41074f3 ("omap_intc: convert ffs(3) to ctz32() in
omap_inth_sir_update()", 2015-04-28).  But it can be improved later.

Thanks,

Paolo
Fam Zheng May 23, 2016, 2:43 a.m. UTC | #2
On Fri, 05/20 16:40, Stefan Hajnoczi wrote:
> The batch notification BH needs to know which virtqueues to notify when
> multiqueue is enabled.  Use a bitmap to track the virtqueues that with
> pending notifications.

This approach works great as long as VQs are in the same iothread. An
alternative way would be using separate BH per VQ, which will naturely work with
multi queue block layer in the future.  Should we just go for that in the first
place?  Seems less code churn, and no imaginable disadvantage compared to this
patch.

Fam

> 
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
>  hw/block/virtio-blk.c          | 20 ++++++++++++++++----
>  include/hw/virtio/virtio-blk.h |  1 +
>  2 files changed, 17 insertions(+), 4 deletions(-)
> 
> diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
> index d16a1b7..ab0f589 100644
> --- a/hw/block/virtio-blk.c
> +++ b/hw/block/virtio-blk.c
> @@ -54,11 +54,19 @@ static void virtio_blk_batch_notify_bh(void *opaque)
>  {
>      VirtIOBlock *s = opaque;
>      VirtIODevice *vdev = VIRTIO_DEVICE(s);
> +    unsigned nvqs = s->conf.num_queues;
> +    unsigned i = 0;
>  
> -    if (s->dataplane_started && !s->dataplane_disabled) {
> -        virtio_blk_data_plane_notify(s->dataplane, s->vq);
> -    } else {
> -        virtio_notify(vdev, s->vq);
> +    while ((i = find_next_bit(s->batch_notify_vqs, nvqs, i)) < nvqs) {
> +        VirtQueue *vq = virtio_get_queue(vdev, i);
> +
> +        bitmap_clear(s->batch_notify_vqs, i, 1);
> +
> +        if (s->dataplane_started && !s->dataplane_disabled) {
> +            virtio_blk_data_plane_notify(s->dataplane, vq);
> +        } else {
> +            virtio_notify(vdev, vq);
> +        }
>      }
>  }
>  
> @@ -77,6 +85,8 @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
>  
>      stb_p(&req->in->status, status);
>      virtqueue_push(req->vq, &req->elem, req->in_len);
> +
> +    bitmap_set(s->batch_notify_vqs, virtio_queue_get_id(req->vq), 1);
>      qemu_bh_schedule(s->batch_notify_bh);
>  }
>  
> @@ -922,6 +932,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
>  
>      s->batch_notify_bh = aio_bh_new(blk_get_aio_context(s->blk),
>                                      virtio_blk_batch_notify_bh, s);
> +    s->batch_notify_vqs = bitmap_new(conf->num_queues);
>  
>      s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
>      register_savevm(dev, "virtio-blk", virtio_blk_id++, 2,
> @@ -938,6 +949,7 @@ static void virtio_blk_device_unrealize(DeviceState *dev, Error **errp)
>      VirtIOBlock *s = VIRTIO_BLK(dev);
>  
>      qemu_bh_delete(s->batch_notify_bh);
> +    g_free(s->batch_notify_vqs);
>      virtio_blk_data_plane_destroy(s->dataplane);
>      s->dataplane = NULL;
>      qemu_del_vm_change_state_handler(s->change);
> diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
> index 487b28d..b6e7860 100644
> --- a/include/hw/virtio/virtio-blk.h
> +++ b/include/hw/virtio/virtio-blk.h
> @@ -51,6 +51,7 @@ typedef struct VirtIOBlock {
>      void *rq;
>      QEMUBH *bh;
>      QEMUBH *batch_notify_bh;
> +    unsigned long *batch_notify_vqs;
>      VirtIOBlkConf conf;
>      unsigned short sector_mask;
>      bool original_wce;
> -- 
> 2.5.5
>
Paolo Bonzini May 23, 2016, 8:17 a.m. UTC | #3
On 23/05/2016 04:43, Fam Zheng wrote:
> > The batch notification BH needs to know which virtqueues to notify when
> > multiqueue is enabled.  Use a bitmap to track the virtqueues that with
> > pending notifications.
> 
> This approach works great as long as VQs are in the same iothread. An
> alternative way would be using separate BH per VQ, which will naturely work with
> multi queue block layer in the future.  Should we just go for that in the first
> place?  Seems less code churn, and no imaginable disadvantage compared to this
> patch.

It can be slower because all BHs are walked during aio_bh_poll, not just
the scheduled ones.

Paolo
Fam Zheng May 23, 2016, 8:56 a.m. UTC | #4
On Mon, 05/23 10:17, Paolo Bonzini wrote:
> 
> 
> On 23/05/2016 04:43, Fam Zheng wrote:
> > > The batch notification BH needs to know which virtqueues to notify when
> > > multiqueue is enabled.  Use a bitmap to track the virtqueues that with
> > > pending notifications.
> > 
> > This approach works great as long as VQs are in the same iothread. An
> > alternative way would be using separate BH per VQ, which will naturely work with
> > multi queue block layer in the future.  Should we just go for that in the first
> > place?  Seems less code churn, and no imaginable disadvantage compared to this
> > patch.
> 
> It can be slower because all BHs are walked during aio_bh_poll, not just
> the scheduled ones.
> 

OK, this is a fair point. Thanks.

Fam
Stefan Hajnoczi May 27, 2016, 9:38 p.m. UTC | #5
On Sat, May 21, 2016 at 06:02:52PM +0200, Paolo Bonzini wrote:
> 
> 
> On 21/05/2016 01:40, Stefan Hajnoczi wrote:
> > +    while ((i = find_next_bit(s->batch_notify_vqs, nvqs, i)) < nvqs) {
> > +        VirtQueue *vq = virtio_get_queue(vdev, i);
> > +
> > +        bitmap_clear(s->batch_notify_vqs, i, 1);
> 
> clear_bit?

Ignorance on my part.  Thanks!

> > +        if (s->dataplane_started && !s->dataplane_disabled) {
> > +            virtio_blk_data_plane_notify(s->dataplane, vq);
> > +        } else {
> > +            virtio_notify(vdev, vq);
> > +        }
> 
> The find_next_bit loop is not very efficient and could use something
> similar to commit 41074f3 ("omap_intc: convert ffs(3) to ctz32() in
> omap_inth_sir_update()", 2015-04-28).  But it can be improved later.

Cool, will try that for inspiration in v2.

Stefan
diff mbox

Patch

diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index d16a1b7..ab0f589 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -54,11 +54,19 @@  static void virtio_blk_batch_notify_bh(void *opaque)
 {
     VirtIOBlock *s = opaque;
     VirtIODevice *vdev = VIRTIO_DEVICE(s);
+    unsigned nvqs = s->conf.num_queues;
+    unsigned i = 0;
 
-    if (s->dataplane_started && !s->dataplane_disabled) {
-        virtio_blk_data_plane_notify(s->dataplane, s->vq);
-    } else {
-        virtio_notify(vdev, s->vq);
+    while ((i = find_next_bit(s->batch_notify_vqs, nvqs, i)) < nvqs) {
+        VirtQueue *vq = virtio_get_queue(vdev, i);
+
+        bitmap_clear(s->batch_notify_vqs, i, 1);
+
+        if (s->dataplane_started && !s->dataplane_disabled) {
+            virtio_blk_data_plane_notify(s->dataplane, vq);
+        } else {
+            virtio_notify(vdev, vq);
+        }
     }
 }
 
@@ -77,6 +85,8 @@  static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
 
     stb_p(&req->in->status, status);
     virtqueue_push(req->vq, &req->elem, req->in_len);
+
+    bitmap_set(s->batch_notify_vqs, virtio_queue_get_id(req->vq), 1);
     qemu_bh_schedule(s->batch_notify_bh);
 }
 
@@ -922,6 +932,7 @@  static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
 
     s->batch_notify_bh = aio_bh_new(blk_get_aio_context(s->blk),
                                     virtio_blk_batch_notify_bh, s);
+    s->batch_notify_vqs = bitmap_new(conf->num_queues);
 
     s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
     register_savevm(dev, "virtio-blk", virtio_blk_id++, 2,
@@ -938,6 +949,7 @@  static void virtio_blk_device_unrealize(DeviceState *dev, Error **errp)
     VirtIOBlock *s = VIRTIO_BLK(dev);
 
     qemu_bh_delete(s->batch_notify_bh);
+    g_free(s->batch_notify_vqs);
     virtio_blk_data_plane_destroy(s->dataplane);
     s->dataplane = NULL;
     qemu_del_vm_change_state_handler(s->change);
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index 487b28d..b6e7860 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -51,6 +51,7 @@  typedef struct VirtIOBlock {
     void *rq;
     QEMUBH *bh;
     QEMUBH *batch_notify_bh;
+    unsigned long *batch_notify_vqs;
     VirtIOBlkConf conf;
     unsigned short sector_mask;
     bool original_wce;