From patchwork Fri Dec 28 10:31:58 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jason Wang X-Patchwork-Id: 1914901 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork2.kernel.org (Postfix) with ESMTP id 07D0CE00D8 for ; Fri, 28 Dec 2012 10:41:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753448Ab2L1Klf (ORCPT ); Fri, 28 Dec 2012 05:41:35 -0500 Received: from mx1.redhat.com ([209.132.183.28]:61628 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753100Ab2L1Kld (ORCPT ); Fri, 28 Dec 2012 05:41:33 -0500 Received: from int-mx10.intmail.prod.int.phx2.redhat.com (int-mx10.intmail.prod.int.phx2.redhat.com [10.5.11.23]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id qBSAfOYa027657 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Fri, 28 Dec 2012 05:41:25 -0500 Received: from amd-6168-8-1.englab.nay.redhat.com (amd-6168-8-1.englab.nay.redhat.com [10.66.104.52]) by int-mx10.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id qBSAeCRP012168; Fri, 28 Dec 2012 05:41:20 -0500 From: Jason Wang To: mst@redhat.com, aliguori@us.ibm.com, stefanha@redhat.com, qemu-devel@nongnu.org Cc: rusty@rustcorp.com.au, kvm@vger.kernel.org, mprivozn@redhat.com, shiyer@redhat.com, krkumar2@in.ibm.com, jwhan@filewood.snu.ac.kr, Jason Wang Subject: [PATCH 06/12] vhost: multiqueue support Date: Fri, 28 Dec 2012 18:31:58 +0800 Message-Id: <1356690724-37891-7-git-send-email-jasowang@redhat.com> In-Reply-To: <1356690724-37891-1-git-send-email-jasowang@redhat.com> References: <1356690724-37891-1-git-send-email-jasowang@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.23 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org This patch lets vhost support multiqueue. The idea is simple, just launching multiple threads of vhost and let each of vhost thread processing a subset of the virtqueues of the device. The only thing needed is passing a virtqueue index when starting vhost device, this is used to track the first virtqueue which this vhost thread serves. Signed-off-by: Jason Wang --- hw/vhost.c | 52 +++++++++++++++++++++++++++++++++------------------- hw/vhost.h | 2 ++ hw/vhost_net.c | 7 +++++-- hw/vhost_net.h | 2 +- hw/virtio-net.c | 3 ++- 5 files changed, 43 insertions(+), 23 deletions(-) diff --git a/hw/vhost.c b/hw/vhost.c index 16322a1..63c76d6 100644 --- a/hw/vhost.c +++ b/hw/vhost.c @@ -619,11 +619,12 @@ static int vhost_virtqueue_init(struct vhost_dev *dev, { hwaddr s, l, a; int r; + int vhost_vq_index = idx % dev->nvqs; struct vhost_vring_file file = { - .index = idx, + .index = vhost_vq_index }; struct vhost_vring_state state = { - .index = idx, + .index = vhost_vq_index }; struct VirtQueue *vvq = virtio_get_queue(vdev, idx); @@ -669,11 +670,12 @@ static int vhost_virtqueue_init(struct vhost_dev *dev, goto fail_alloc_ring; } - r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled); + r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); if (r < 0) { r = -errno; goto fail_alloc; } + file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file); if (r) { @@ -714,7 +716,7 @@ static void vhost_virtqueue_cleanup(struct vhost_dev *dev, unsigned idx) { struct vhost_vring_state state = { - .index = idx, + .index = idx % dev->nvqs, }; int r; r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state); @@ -829,7 +831,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) } for (i = 0; i < hdev->nvqs; ++i) { - r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, true); + r = vdev->binding->set_host_notifier(vdev->binding_opaque, + hdev->vq_index + i, + true); if (r < 0) { fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r); goto fail_vq; @@ -839,7 +843,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) return 0; fail_vq: while (--i >= 0) { - r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false); + r = vdev->binding->set_host_notifier(vdev->binding_opaque, + hdev->vq_index + i, + false); if (r < 0) { fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r); fflush(stderr); @@ -860,7 +866,9 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) int i, r; for (i = 0; i < hdev->nvqs; ++i) { - r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false); + r = vdev->binding->set_host_notifier(vdev->binding_opaque, + hdev->vq_index + i, + false); if (r < 0) { fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r); fflush(stderr); @@ -879,10 +887,12 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) goto fail; } - r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true); - if (r < 0) { - fprintf(stderr, "Error binding guest notifier: %d\n", -r); - goto fail_notifiers; + if (hdev->vq_index == 0) { + r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true); + if (r < 0) { + fprintf(stderr, "Error binding guest notifier: %d\n", -r); + goto fail_notifiers; + } } r = vhost_dev_set_features(hdev, hdev->log_enabled); @@ -898,7 +908,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) r = vhost_virtqueue_init(hdev, vdev, hdev->vqs + i, - i); + hdev->vq_index + i); if (r < 0) { goto fail_vq; } @@ -925,8 +935,9 @@ fail_vq: vhost_virtqueue_cleanup(hdev, vdev, hdev->vqs + i, - i); + hdev->vq_index + i); } + i = hdev->nvqs; fail_mem: fail_features: vdev->binding->set_guest_notifiers(vdev->binding_opaque, false); @@ -944,21 +955,24 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) vhost_virtqueue_cleanup(hdev, vdev, hdev->vqs + i, - i); + hdev->vq_index + i); } for (i = 0; i < hdev->n_mem_sections; ++i) { vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i], 0, (hwaddr)~0x0ull); } - r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false); - if (r < 0) { - fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r); - fflush(stderr); + if (hdev->vq_index == 0) { + r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false); + if (r < 0) { + fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r); + fflush(stderr); + } + assert (r>= 0); } - assert (r >= 0); hdev->started = false; g_free(hdev->log); hdev->log = NULL; hdev->log_size = 0; } + diff --git a/hw/vhost.h b/hw/vhost.h index 0c47229..e94a9f7 100644 --- a/hw/vhost.h +++ b/hw/vhost.h @@ -34,6 +34,8 @@ struct vhost_dev { MemoryRegionSection *mem_sections; struct vhost_virtqueue *vqs; int nvqs; + /* the first virtuque which would be used by this vhost dev */ + int vq_index; unsigned long long features; unsigned long long acked_features; unsigned long long backend_features; diff --git a/hw/vhost_net.c b/hw/vhost_net.c index 8241601..cdb294c 100644 --- a/hw/vhost_net.c +++ b/hw/vhost_net.c @@ -138,13 +138,15 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev) } int vhost_net_start(struct vhost_net *net, - VirtIODevice *dev) + VirtIODevice *dev, + int vq_index) { struct vhost_vring_file file = { }; int r; net->dev.nvqs = 2; net->dev.vqs = net->vqs; + net->dev.vq_index = vq_index; r = vhost_dev_enable_notifiers(&net->dev, dev); if (r < 0) { @@ -214,7 +216,8 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev) } int vhost_net_start(struct vhost_net *net, - VirtIODevice *dev) + VirtIODevice *dev, + int vq_index) { return -ENOSYS; } diff --git a/hw/vhost_net.h b/hw/vhost_net.h index a9db234..c9a8429 100644 --- a/hw/vhost_net.h +++ b/hw/vhost_net.h @@ -9,7 +9,7 @@ typedef struct vhost_net VHostNetState; VHostNetState *vhost_net_init(NetClientState *backend, int devfd, bool force); bool vhost_net_query(VHostNetState *net, VirtIODevice *dev); -int vhost_net_start(VHostNetState *net, VirtIODevice *dev); +int vhost_net_start(VHostNetState *net, VirtIODevice *dev, int vq_index); void vhost_net_stop(VHostNetState *net, VirtIODevice *dev); void vhost_net_cleanup(VHostNetState *net); diff --git a/hw/virtio-net.c b/hw/virtio-net.c index d57a5a5..70bc0e6 100644 --- a/hw/virtio-net.c +++ b/hw/virtio-net.c @@ -126,7 +126,8 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) if (!vhost_net_query(tap_get_vhost_net(qemu_get_queue(n->nic)->peer), &n->vdev)) { return; } - r = vhost_net_start(tap_get_vhost_net(qemu_get_queue(n->nic)->peer), &n->vdev); + r = vhost_net_start(tap_get_vhost_net(qemu_get_queue(n->nic)->peer), + &n->vdev, 0); if (r < 0) { error_report("unable to start vhost net: %d: " "falling back on userspace virtio", -r);