diff mbox series

[for,9.0,09/12] vdpa: approve switchover after memory map in the migration destination

Message ID 20231215172830.2540987-10-eperezma@redhat.com (mailing list archive)
State New, archived
Headers show
Series Map memory at destination .load_setup in vDPA-net migration | expand

Commit Message

Eugenio Perez Martin Dec. 15, 2023, 5:28 p.m. UTC
If the VM migrates before finishing all the maps, the source will stop
but the destination is still not ready to continue, and it will wait
until all guest RAM is mapped.  The destination can use switchover_ack
to prevent source to stop until all the memory is mapped at the
destination.

Signed-off-by: Eugenio PĂ©rez <eperezma@redhat.com>
---
 include/hw/virtio/vhost-vdpa.h |  4 ++++
 hw/virtio/vhost-vdpa.c         | 18 ++++++++++++++++++
 2 files changed, 22 insertions(+)
diff mbox series

Patch

diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index b49286b327..1c7e3fbd24 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -34,6 +34,7 @@  typedef struct VhostVDPAHostNotifier {
 typedef struct VhostVDPAMapThread {
     QemuThread thread;
     GAsyncQueue *queue;
+    QEMUBH *bh;
     bool map_thread_enabled;
 } VhostVDPAMapThread;
 
@@ -60,6 +61,9 @@  typedef struct vhost_vdpa_shared {
      * To solve it, offload the first listener operations until the first
      * listener commit from the main thread.  Once these are served, join the
      * map thread.
+     *
+     * This map thread is joined by join_map_thread BH if
+     * migrate_switchover_ack is supported, or by vhost_vdpa_dev_start if not.
      */
     VhostVDPAMapThread *map_thread;
 
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 339e11c58a..7d31f4a30e 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -22,6 +22,8 @@ 
 #include "hw/virtio/vhost-vdpa.h"
 #include "exec/address-spaces.h"
 #include "migration/blocker.h"
+#include "migration/options.h"
+#include "migration/savevm.h"
 #include "qemu/cutils.h"
 #include "qemu/main-loop.h"
 #include "trace.h"
@@ -1372,13 +1374,26 @@  static void *vhost_vdpa_load_map_worker(void *opaque)
     }
 
 end:
+    if (shared->map_thread->bh) {
+        qemu_bh_schedule(shared->map_thread->bh);
+    }
+
     return ret;
 }
 
+static void vhost_vdpa_load_map_switchover_ack(void *opaque)
+{
+    qemu_loadvm_approve_switchover();
+}
+
 static void vhost_vdpa_spawn_maps_thread(VhostVDPAShared *shared)
 {
     shared->map_thread = g_new0(VhostVDPAMapThread, 1);
     shared->map_thread->queue = g_async_queue_new();
+    if (migrate_switchover_ack()) {
+        shared->map_thread->bh = qemu_bh_new(vhost_vdpa_load_map_switchover_ack,
+                                             NULL);
+    }
     qemu_thread_create(&shared->map_thread->thread, "vdpa map thread",
                        vhost_vdpa_load_map_worker, shared,
                        QEMU_THREAD_JOINABLE);
@@ -1390,6 +1405,9 @@  static bool vhost_vdpa_join_maps_thread(VhostVDPAShared *shared)
     g_autoptr(GPtrArray) failed_iova = NULL;
 
     failed_iova = qemu_thread_join(&shared->map_thread->thread);
+    if (shared->map_thread->bh) {
+        qemu_bh_delete(shared->map_thread->bh);
+    }
     g_async_queue_unref(shared->map_thread->queue);
     g_clear_pointer(&shared->map_thread, g_free);