diff mbox series

[RFC,2/2] migration: Avoid doing RAMBlock dirty sync in the initial iteration

Message ID 76f0ea57ac7f4c2a68203d17fec1c34bb3d16a4a.1729648695.git.yong.huang@smartx.com (mailing list archive)
State New
Headers show
Series migration: Skip sync in ram_init_bitmaps() | expand

Commit Message

Yong Huang Oct. 23, 2024, 2:09 a.m. UTC
From: Hyman Huang <yong.huang@smartx.com>

KVM always returns 1 when userspace retrieves a dirty bitmap for
the first time when KVM_DIRTY_LOG_INITIALLY_SET is enabled; in such
scenario, the RAMBlock dirty sync of the initial iteration can be
skipped.

Signed-off-by: Hyman Huang <yong.huang@smartx.com>
---
 migration/cpu-throttle.c |  3 ++-
 migration/ram.c          | 30 +++++++++++++++++++++++++++---
 2 files changed, 29 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/migration/cpu-throttle.c b/migration/cpu-throttle.c
index 342681cdd4..06e3b1be78 100644
--- a/migration/cpu-throttle.c
+++ b/migration/cpu-throttle.c
@@ -27,6 +27,7 @@ 
 #include "hw/core/cpu.h"
 #include "qemu/main-loop.h"
 #include "sysemu/cpus.h"
+#include "sysemu/kvm.h"
 #include "cpu-throttle.h"
 #include "migration.h"
 #include "migration-stats.h"
@@ -141,7 +142,7 @@  void cpu_throttle_dirty_sync_timer_tick(void *opaque)
      * effect on guest performance, therefore omit it to avoid
      * paying extra for the sync penalty.
      */
-    if (sync_cnt <= 1) {
+    if (sync_cnt <= (kvm_dirty_log_manual_enabled() ? 0 : 1)) {
         goto end;
     }
 
diff --git a/migration/ram.c b/migration/ram.c
index d284f63854..b312ebd69d 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2718,7 +2718,7 @@  static void ram_list_init_bitmaps(void)
 {
     MigrationState *ms = migrate_get_current();
     RAMBlock *block;
-    unsigned long pages;
+    unsigned long pages, clear_bmap_pages;
     uint8_t shift;
 
     /* Skip setting bitmap if there is no RAM */
@@ -2736,6 +2736,7 @@  static void ram_list_init_bitmaps(void)
 
         RAMBLOCK_FOREACH_NOT_IGNORED(block) {
             pages = block->max_length >> TARGET_PAGE_BITS;
+            clear_bmap_pages = clear_bmap_size(pages, shift);
             /*
              * The initial dirty bitmap for migration must be set with all
              * ones to make sure we'll migrate every guest RAM page to
@@ -2751,7 +2752,17 @@  static void ram_list_init_bitmaps(void)
                 block->file_bmap = bitmap_new(pages);
             }
             block->clear_bmap_shift = shift;
-            block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
+            block->clear_bmap = bitmap_new(clear_bmap_pages);
+            /*
+             * Set the clear bitmap by default to enable dirty logging.
+             *
+             * Note that with KVM_DIRTY_LOG_INITIALLY_SET, dirty logging
+             * will be enabled gradually in small chunks using
+             * KVM_CLEAR_DIRTY_LOG
+             */
+            if (kvm_dirty_log_manual_enabled()) {
+                bitmap_set(block->clear_bmap, 0, clear_bmap_pages);
+            }
         }
     }
 }
@@ -2771,6 +2782,7 @@  static void migration_bitmap_clear_discarded_pages(RAMState *rs)
 
 static bool ram_init_bitmaps(RAMState *rs, Error **errp)
 {
+    Error *local_err = NULL;
     bool ret = true;
 
     qemu_mutex_lock_ramlist();
@@ -2783,7 +2795,19 @@  static bool ram_init_bitmaps(RAMState *rs, Error **errp)
             if (!ret) {
                 goto out_unlock;
             }
-            migration_bitmap_sync_precopy(false);
+
+            if (kvm_dirty_log_manual_enabled()) {
+                /*
+                 * Bypass the RAMBlock dirty sync and still publish a
+                 * notification.
+                 */
+                if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC,
+                            &local_err)) {
+                    error_report_err(local_err);
+                }
+            } else {
+                migration_bitmap_sync_precopy(false);
+            }
         }
     }
 out_unlock: