diff mbox series

[RFC,v3,12/29] vhost: add vhost_kernel_vring_pause

Message ID 20210519162903.1172366-13-eperezma@redhat.com (mailing list archive)
State New, archived
Headers show
Series vDPA software assisted live migration | expand

Commit Message

Eugenio Perez Martin May 19, 2021, 4:28 p.m. UTC
This is just a commit to allow the testing with vhost-net, not intended
for the final version or any other device.

vhost_kernel_vring_pause stops the device, so qemu can ask for its status
(next available idx the device was going to consume) and to replace
vring addresses. When SVQ starts it can resume consuming the guest's
driver ring, without notice from the latter. Not stopping the device
before of the swapping could imply that it process more buffers than
reported, what would duplicate the device action.

Mimic vhost-vdpa behavior, vhost_kernel_start is intended to resume the
device. In the former it performs a full reset. Since this is a
temporary commit to allow testing with vhost-net, the latter just set a
new backend, that is enough for vhost-net to realize the new vring
addresses.

Signed-off-by: Eugenio PĂ©rez <eperezma@redhat.com>
---
 hw/virtio/vhost-backend.c | 42 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 42 insertions(+)
diff mbox series

Patch

diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 31b33bde37..9653b7fddb 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -201,6 +201,46 @@  static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx)
     return idx - dev->vq_index;
 }
 
+static int vhost_kernel_set_vq_pause(struct vhost_dev *dev, unsigned idx,
+                                     bool pause)
+{
+    struct vhost_vring_file file = {
+        .index = idx,
+    };
+
+    if (pause) {
+        file.fd = -1; /* Pass -1 to unbind from file. */
+    } else {
+        struct vhost_net *vn_dev = container_of(dev, struct vhost_net, dev);
+        file.fd = vn_dev->backend;
+    }
+
+    return vhost_kernel_net_set_backend(dev, &file);
+}
+
+static int vhost_kernel_vring_pause(struct vhost_dev *dev)
+{
+    int i;
+
+    for (i = 0; i < dev->nvqs; ++i) {
+        vhost_kernel_set_vq_pause(dev, i, true);
+    }
+
+    return 0;
+}
+
+static int vhost_kernel_start(struct vhost_dev *dev, bool start)
+{
+    int i;
+
+    assert(start);
+    for (i = 0; i < dev->nvqs; ++i) {
+        vhost_kernel_set_vq_pause(dev, i, false);
+    }
+
+    return 0;
+}
+
 #ifdef CONFIG_VHOST_VSOCK
 static int vhost_kernel_vsock_set_guest_cid(struct vhost_dev *dev,
                                             uint64_t guest_cid)
@@ -317,6 +357,8 @@  static const VhostOps kernel_ops = {
         .vhost_set_owner = vhost_kernel_set_owner,
         .vhost_reset_device = vhost_kernel_reset_device,
         .vhost_get_vq_index = vhost_kernel_get_vq_index,
+        .vhost_dev_start = vhost_kernel_start,
+        .vhost_vring_pause = vhost_kernel_vring_pause,
 #ifdef CONFIG_VHOST_VSOCK
         .vhost_vsock_set_guest_cid = vhost_kernel_vsock_set_guest_cid,
         .vhost_vsock_set_running = vhost_kernel_vsock_set_running,