diff mbox series

[PULL,13/18] vhost_net: configure all host notifiers in a single MR transaction

Message ID 6166799f698512ab35e216b4ff4f7156f47923c6.1726062663.git.mst@redhat.com (mailing list archive)
State New, archived
Headers show
Series [PULL,01/18] virtio: Allow .get_vhost() without vhost_started | expand

Commit Message

Michael S. Tsirkin Sept. 11, 2024, 1:52 p.m. UTC
From: zuoboqun <zuoboqun@baidu.com>

This allows the vhost_net device which has multiple virtqueues to batch
the setup of all its host notifiers. This significantly reduces the
vhost_net device starting and stoping time, e.g. the time spend
on enabling notifiers reduce from 630ms to 75ms and the time spend on
disabling notifiers reduce from 441ms to 45ms for a VM with 192 vCPUs
and 15 vhost-user-net devices (64vq per device) in our case.

Signed-off-by: zuoboqun <zuoboqun@baidu.com>
Message-Id: <20240816070835.8309-1-zuoboqun@baidu.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 include/hw/virtio/vhost.h |   4 +
 hw/net/vhost_net.c        | 155 +++++++++++++++++++++++++++++++++++---
 hw/virtio/vhost.c         |   6 +-
 3 files changed, 150 insertions(+), 15 deletions(-)
diff mbox series

Patch

diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index d75faf46e9..c75be46c06 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -171,6 +171,10 @@  int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
  */
 void vhost_dev_cleanup(struct vhost_dev *hdev);
 
+void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev,
+                                      VirtIODevice *vdev,
+                                      unsigned int nvqs);
+
 /**
  * vhost_dev_enable_notifiers() - enable event notifiers
  * @hdev: common vhost_dev structure
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index dedf9ad7c2..997aab0557 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -162,6 +162,135 @@  void vhost_net_save_acked_features(NetClientState *nc)
 #endif
 }
 
+static void vhost_net_disable_notifiers_nvhosts(VirtIODevice *dev,
+                NetClientState *ncs, int data_queue_pairs, int nvhosts)
+{
+    VirtIONet *n = VIRTIO_NET(dev);
+    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
+    struct vhost_net *net;
+    struct vhost_dev *hdev;
+    int r, i, j;
+    NetClientState *peer;
+
+    /*
+     * Batch all the host notifiers in a single transaction to avoid
+     * quadratic time complexity in address_space_update_ioeventfds().
+     */
+    memory_region_transaction_begin();
+
+    for (i = 0; i < nvhosts; i++) {
+        if (i < data_queue_pairs) {
+            peer = qemu_get_peer(ncs, i);
+        } else {
+            peer = qemu_get_peer(ncs, n->max_queue_pairs);
+        }
+
+        net = get_vhost_net(peer);
+        hdev = &net->dev;
+        for (j = 0; j < hdev->nvqs; j++) {
+            r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus),
+                                             hdev->vq_index + j,
+                                             false);
+            if (r < 0) {
+                error_report("vhost %d VQ %d notifier cleanup failed: %d",
+                              i, j, -r);
+            }
+            assert(r >= 0);
+        }
+    }
+    /*
+     * The transaction expects the ioeventfds to be open when it
+     * commits. Do it now, before the cleanup loop.
+     */
+    memory_region_transaction_commit();
+
+    for (i = 0; i < nvhosts; i++) {
+        if (i < data_queue_pairs) {
+            peer = qemu_get_peer(ncs, i);
+        } else {
+            peer = qemu_get_peer(ncs, n->max_queue_pairs);
+        }
+
+        net = get_vhost_net(peer);
+        hdev = &net->dev;
+        for (j = 0; j < hdev->nvqs; j++) {
+            virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus),
+                                             hdev->vq_index + j);
+        }
+        virtio_device_release_ioeventfd(dev);
+    }
+}
+
+static int vhost_net_enable_notifiers(VirtIODevice *dev,
+                NetClientState *ncs, int data_queue_pairs, int cvq)
+{
+    VirtIONet *n = VIRTIO_NET(dev);
+    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
+    int nvhosts = data_queue_pairs + cvq;
+    struct vhost_net *net;
+    struct vhost_dev *hdev;
+    int r, i, j;
+    NetClientState *peer;
+
+    /*
+     * Batch all the host notifiers in a single transaction to avoid
+     * quadratic time complexity in address_space_update_ioeventfds().
+     */
+    memory_region_transaction_begin();
+
+    for (i = 0; i < nvhosts; i++) {
+        if (i < data_queue_pairs) {
+            peer = qemu_get_peer(ncs, i);
+        } else {
+            peer = qemu_get_peer(ncs, n->max_queue_pairs);
+        }
+
+        net = get_vhost_net(peer);
+        hdev = &net->dev;
+        /*
+         * We will pass the notifiers to the kernel, make sure that QEMU
+         * doesn't interfere.
+         */
+        r = virtio_device_grab_ioeventfd(dev);
+        if (r < 0) {
+            error_report("binding does not support host notifiers");
+            memory_region_transaction_commit();
+            goto fail_nvhosts;
+        }
+
+        for (j = 0; j < hdev->nvqs; j++) {
+            r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus),
+                                             hdev->vq_index + j,
+                                             true);
+            if (r < 0) {
+                error_report("vhost %d VQ %d notifier binding failed: %d",
+                              i, j, -r);
+                memory_region_transaction_commit();
+                vhost_dev_disable_notifiers_nvqs(hdev, dev, j);
+                goto fail_nvhosts;
+            }
+        }
+    }
+
+    memory_region_transaction_commit();
+
+    return 0;
+fail_nvhosts:
+    vhost_net_disable_notifiers_nvhosts(dev, ncs, data_queue_pairs, i);
+    return r;
+}
+
+/*
+ * Stop processing guest IO notifications in qemu.
+ * Start processing them in vhost in kernel.
+ */
+static void vhost_net_disable_notifiers(VirtIODevice *dev,
+                NetClientState *ncs, int data_queue_pairs, int cvq)
+{
+    vhost_net_disable_notifiers_nvhosts(dev, ncs, data_queue_pairs,
+                                        data_queue_pairs + cvq);
+}
+
 static int vhost_net_get_fd(NetClientState *backend)
 {
     switch (backend->info->type) {
@@ -272,11 +401,6 @@  static int vhost_net_start_one(struct vhost_net *net,
         }
     }
 
-    r = vhost_dev_enable_notifiers(&net->dev, dev);
-    if (r < 0) {
-        goto fail_notifiers;
-    }
-
     r = vhost_dev_start(&net->dev, dev, false);
     if (r < 0) {
         goto fail_start;
@@ -328,8 +452,6 @@  fail:
     }
     vhost_dev_stop(&net->dev, dev, false);
 fail_start:
-    vhost_dev_disable_notifiers(&net->dev, dev);
-fail_notifiers:
     return r;
 }
 
@@ -351,7 +473,6 @@  static void vhost_net_stop_one(struct vhost_net *net,
     if (net->nc->info->stop) {
         net->nc->info->stop(net->nc);
     }
-    vhost_dev_disable_notifiers(&net->dev, dev);
 }
 
 int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
@@ -396,10 +517,16 @@  int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
         }
      }
 
+    r = vhost_net_enable_notifiers(dev, ncs, data_queue_pairs, cvq);
+    if (r < 0) {
+        error_report("Error enabling host notifiers: %d", -r);
+        goto err;
+    }
+
     r = k->set_guest_notifiers(qbus->parent, total_notifiers, true);
     if (r < 0) {
         error_report("Error binding guest notifier: %d", -r);
-        goto err;
+        goto err_host_notifiers;
     }
 
     for (i = 0; i < nvhosts; i++) {
@@ -414,19 +541,19 @@  int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
             r = vhost_set_vring_enable(peer, peer->vring_enable);
 
             if (r < 0) {
-                goto err_start;
+                goto err_guest_notifiers;
             }
         }
 
         r = vhost_net_start_one(get_vhost_net(peer), dev);
         if (r < 0) {
-            goto err_start;
+            goto err_guest_notifiers;
         }
     }
 
     return 0;
 
-err_start:
+err_guest_notifiers:
     while (--i >= 0) {
         peer = qemu_get_peer(ncs, i < data_queue_pairs ?
                                   i : n->max_queue_pairs);
@@ -437,6 +564,8 @@  err_start:
         fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
         fflush(stderr);
     }
+err_host_notifiers:
+    vhost_net_disable_notifiers(dev, ncs, data_queue_pairs, cvq);
 err:
     return r;
 }
@@ -468,6 +597,8 @@  void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
         fflush(stderr);
     }
     assert(r >= 0);
+
+    vhost_net_disable_notifiers(dev, ncs, data_queue_pairs, cvq);
 }
 
 void vhost_net_cleanup(struct vhost_net *net)
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 06fc71746e..7c5ef81b55 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1682,9 +1682,9 @@  void vhost_dev_cleanup(struct vhost_dev *hdev)
     memset(hdev, 0, sizeof(struct vhost_dev));
 }
 
-static void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev,
-                                             VirtIODevice *vdev,
-                                             unsigned int nvqs)
+void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev,
+                                      VirtIODevice *vdev,
+                                      unsigned int nvqs)
 {
     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
     int i, r;