diff mbox series

[RESEND,RFC,05/10] migration: Introduce util functions for periodic CPU throttle

Message ID 26d4e848d67b14ebeb9a3e0667688056a65df4b4.1725891841.git.yong.huang@smartx.com (mailing list archive)
State New
Headers show
Series migration: auto-converge refinements for huge VM | expand

Commit Message

Yong Huang Sept. 9, 2024, 2:25 p.m. UTC
Provide useful utilities to manage the periodic_throttle_thread's
lifespan. Additionally, to set up sync mode, provide
periodic_throttle_setup.

Signed-off-by: Hyman Huang <yong.huang@smartx.com>
---
 migration/ram.c        | 98 +++++++++++++++++++++++++++++++++++++++++-
 migration/ram.h        |  4 ++
 migration/trace-events |  3 ++
 3 files changed, 104 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/migration/ram.c b/migration/ram.c
index 23471c9e5a..d9d8ed0fda 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -416,6 +416,10 @@  struct RAMState {
      * RAM migration.
      */
     unsigned int postcopy_bmap_sync_requested;
+
+    /* Periodic throttle information */
+    bool throttle_running;
+    QemuThread throttle_thread;
 };
 typedef struct RAMState RAMState;
 
@@ -1075,7 +1079,13 @@  static void migration_bitmap_sync(RAMState *rs,
     RAMBlock *block;
     int64_t end_time;
 
-    if (!periodic) {
+    if (periodic) {
+        /* Be careful that we don't synchronize too often */
+        int64_t curr_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+        if (curr_time < rs->time_last_bitmap_sync + 1000) {
+            return;
+        }
+    } else {
         stat64_add(&mig_stats.iteration_count, 1);
     }
 
@@ -1121,6 +1131,92 @@  static void migration_bitmap_sync(RAMState *rs,
     }
 }
 
+static void *periodic_throttle_thread(void *opaque)
+{
+    RAMState *rs = opaque;
+    bool skip_sleep = false;
+    int sleep_duration = migrate_periodic_throttle_interval();
+
+    rcu_register_thread();
+
+    while (qatomic_read(&rs->throttle_running)) {
+        int64_t curr_time;
+        /*
+         * The first iteration copies all memory anyhow and has no
+         * effect on guest performance, therefore omit it to avoid
+         * paying extra for the sync penalty.
+         */
+        if (stat64_get(&mig_stats.iteration_count) <= 1) {
+            continue;
+        }
+
+        if (!skip_sleep) {
+            sleep(sleep_duration);
+        }
+
+        /* Be careful that we don't synchronize too often */
+        curr_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+        if (curr_time > rs->time_last_bitmap_sync + 1000) {
+            bql_lock();
+            trace_migration_periodic_throttle();
+            WITH_RCU_READ_LOCK_GUARD() {
+                migration_bitmap_sync(rs, false, true);
+            }
+            bql_unlock();
+            skip_sleep = false;
+        } else {
+            skip_sleep = true;
+        }
+    }
+
+    rcu_unregister_thread();
+
+    return NULL;
+}
+
+void periodic_throttle_start(void)
+{
+    RAMState *rs = ram_state;
+
+    if (!rs) {
+        return;
+    }
+
+    if (qatomic_read(&rs->throttle_running)) {
+        return;
+    }
+
+    trace_migration_periodic_throttle_start();
+
+    qatomic_set(&rs->throttle_running, 1);
+    qemu_thread_create(&rs->throttle_thread,
+                       NULL, periodic_throttle_thread,
+                       rs, QEMU_THREAD_JOINABLE);
+}
+
+void periodic_throttle_stop(void)
+{
+    RAMState *rs = ram_state;
+
+    if (!rs) {
+        return;
+    }
+
+    if (!qatomic_read(&rs->throttle_running)) {
+        return;
+    }
+
+    trace_migration_periodic_throttle_stop();
+
+    qatomic_set(&rs->throttle_running, 0);
+    qemu_thread_join(&rs->throttle_thread);
+}
+
+void periodic_throttle_setup(bool enable)
+{
+    sync_mode = enable ? RAMBLOCK_SYN_MODERN : RAMBLOCK_SYN_LEGACY;
+}
+
 static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage)
 {
     Error *local_err = NULL;
diff --git a/migration/ram.h b/migration/ram.h
index bc0318b834..f7c7b2e7ad 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -93,4 +93,8 @@  void ram_write_tracking_prepare(void);
 int ram_write_tracking_start(void);
 void ram_write_tracking_stop(void);
 
+/* Periodic throttle */
+void periodic_throttle_start(void);
+void periodic_throttle_stop(void);
+void periodic_throttle_setup(bool enable);
 #endif
diff --git a/migration/trace-events b/migration/trace-events
index c65902f042..5b9db57c8f 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -95,6 +95,9 @@  get_queued_page_not_dirty(const char *block_name, uint64_t tmp_offset, unsigned
 migration_bitmap_sync_start(void) ""
 migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64
 migration_bitmap_clear_dirty(char *str, uint64_t start, uint64_t size, unsigned long page) "rb %s start 0x%"PRIx64" size 0x%"PRIx64" page 0x%lx"
+migration_periodic_throttle(void) ""
+migration_periodic_throttle_start(void) ""
+migration_periodic_throttle_stop(void) ""
 migration_throttle(void) ""
 migration_dirty_limit_guest(int64_t dirtyrate) "guest dirty page rate limit %" PRIi64 " MB/s"
 ram_discard_range(const char *rbname, uint64_t start, size_t len) "%s: start: %" PRIx64 " %zx"