diff mbox series

[v4,5/6] migration: Support periodic RAMBlock dirty bitmap sync

Message ID f61f1b3653f2acf026901103e1c73d157d38b08f.1729146786.git.yong.huang@smartx.com (mailing list archive)
State New
Headers show
Series migration: auto-converge refinements for huge VM | expand

Commit Message

Yong Huang Oct. 17, 2024, 6:42 a.m. UTC
From: Hyman Huang <yong.huang@smartx.com>

When VM is configured with huge memory, the current throttle logic
doesn't look like to scale, because migration_trigger_throttle()
is only called for each iteration, so it won't be invoked for a long
time if one iteration can take a long time.

The periodic dirty sync aims to fix the above issue by synchronizing
the ramblock from remote dirty bitmap and, when necessary, triggering
the CPU throttle multiple times during a long iteration.

This is a trade-off between synchronization overhead and CPU throttle
impact.

Signed-off-by: Hyman Huang <yong.huang@smartx.com>
---
 migration/cpu-throttle.c | 65 +++++++++++++++++++++++++++++++++++++++-
 migration/cpu-throttle.h | 14 +++++++++
 migration/migration.c    | 14 +++++++--
 migration/migration.h    |  1 +
 migration/ram.c          |  2 +-
 migration/trace-events   |  1 +
 6 files changed, 93 insertions(+), 4 deletions(-)

Comments

Fabiano Rosas Oct. 17, 2024, 6:57 p.m. UTC | #1
yong.huang@smartx.com writes:

> From: Hyman Huang <yong.huang@smartx.com>
>
> When VM is configured with huge memory, the current throttle logic
> doesn't look like to scale, because migration_trigger_throttle()
> is only called for each iteration, so it won't be invoked for a long
> time if one iteration can take a long time.
>
> The periodic dirty sync aims to fix the above issue by synchronizing
> the ramblock from remote dirty bitmap and, when necessary, triggering
> the CPU throttle multiple times during a long iteration.
>
> This is a trade-off between synchronization overhead and CPU throttle
> impact.
>
> Signed-off-by: Hyman Huang <yong.huang@smartx.com>

Reviewed-by: Fabiano Rosas <farosas@suse.de>
Peter Xu Oct. 17, 2024, 7:33 p.m. UTC | #2
On Thu, Oct 17, 2024 at 02:42:54PM +0800, yong.huang@smartx.com wrote:
> +void cpu_throttle_dirty_sync_timer_tick(void *opaque)
> +{
> +    static uint64_t prev_sync_cnt;

We may need to reset this in case migration got cancelled and invoked
again, to make sure it keeps working in the 2nd run.

> +    uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
> +
> +    /*
> +     * The first iteration copies all memory anyhow and has no
> +     * effect on guest performance, therefore omit it to avoid
> +     * paying extra for the sync penalty.
> +     */
> +    if (sync_cnt <= 1) {
> +        goto end;
> +    }
> +
> +    if (sync_cnt == prev_sync_cnt) {
> +        trace_cpu_throttle_dirty_sync();
> +        WITH_RCU_READ_LOCK_GUARD() {
> +            migration_bitmap_sync_precopy(false);
> +        }
> +    }
> +
> +end:
> +    prev_sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
> +
> +    timer_mod(throttle_dirty_sync_timer,
> +        qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
> +            CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
> +}

Please both of you have a look on whether you agree I squash below into
this patch when merge:

===8<===
From 84a2544eab73e35dbd35fed3b1440169915f9aa4 Mon Sep 17 00:00:00 2001
From: Peter Xu <peterx@redhat.com>
Date: Thu, 17 Oct 2024 15:27:19 -0400
Subject: [PATCH] fixup! migration: Support periodic RAMBlock dirty bitmap sync

Signed-off-by: Peter Xu <peterx@redhat.com>
---
 migration/cpu-throttle.c | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/migration/cpu-throttle.c b/migration/cpu-throttle.c
index 342681cdd4..3df287d8d3 100644
--- a/migration/cpu-throttle.c
+++ b/migration/cpu-throttle.c
@@ -36,6 +36,7 @@
 static QEMUTimer *throttle_timer, *throttle_dirty_sync_timer;
 static unsigned int throttle_percentage;
 static bool throttle_dirty_sync_timer_active;
+static uint64_t throttle_dirty_sync_count_prev;
 
 #define CPU_THROTTLE_PCT_MIN 1
 #define CPU_THROTTLE_PCT_MAX 99
@@ -133,7 +134,6 @@ int cpu_throttle_get_percentage(void)
 
 void cpu_throttle_dirty_sync_timer_tick(void *opaque)
 {
-    static uint64_t prev_sync_cnt;
     uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
 
     /*
@@ -145,7 +145,7 @@ void cpu_throttle_dirty_sync_timer_tick(void *opaque)
         goto end;
     }
 
-    if (sync_cnt == prev_sync_cnt) {
+    if (sync_cnt == throttle_dirty_sync_count_prev) {
         trace_cpu_throttle_dirty_sync();
         WITH_RCU_READ_LOCK_GUARD() {
             migration_bitmap_sync_precopy(false);
@@ -153,7 +153,7 @@ void cpu_throttle_dirty_sync_timer_tick(void *opaque)
     }
 
 end:
-    prev_sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
+    throttle_dirty_sync_count_prev = stat64_get(&mig_stats.dirty_sync_count);
 
     timer_mod(throttle_dirty_sync_timer,
         qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
@@ -171,6 +171,11 @@ void cpu_throttle_dirty_sync_timer(bool enable)
 
     if (enable) {
         if (!cpu_throttle_dirty_sync_active()) {
+            /*
+             * Always reset the dirty sync count cache, in case migration
+             * was cancelled once.
+             */
+            throttle_dirty_sync_count_prev = 0;
             timer_mod(throttle_dirty_sync_timer,
                 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
                     CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
Fabiano Rosas Oct. 17, 2024, 8:35 p.m. UTC | #3
Peter Xu <peterx@redhat.com> writes:

> On Thu, Oct 17, 2024 at 02:42:54PM +0800, yong.huang@smartx.com wrote:
>> +void cpu_throttle_dirty_sync_timer_tick(void *opaque)
>> +{
>> +    static uint64_t prev_sync_cnt;
>
> We may need to reset this in case migration got cancelled and invoked
> again, to make sure it keeps working in the 2nd run.
>
>> +    uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
>> +
>> +    /*
>> +     * The first iteration copies all memory anyhow and has no
>> +     * effect on guest performance, therefore omit it to avoid
>> +     * paying extra for the sync penalty.
>> +     */
>> +    if (sync_cnt <= 1) {
>> +        goto end;
>> +    }
>> +
>> +    if (sync_cnt == prev_sync_cnt) {
>> +        trace_cpu_throttle_dirty_sync();
>> +        WITH_RCU_READ_LOCK_GUARD() {
>> +            migration_bitmap_sync_precopy(false);
>> +        }
>> +    }
>> +
>> +end:
>> +    prev_sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
>> +
>> +    timer_mod(throttle_dirty_sync_timer,
>> +        qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
>> +            CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
>> +}
>
> Please both of you have a look on whether you agree I squash below into
> this patch when merge:
>
> ===8<===
> From 84a2544eab73e35dbd35fed3b1440169915f9aa4 Mon Sep 17 00:00:00 2001
> From: Peter Xu <peterx@redhat.com>
> Date: Thu, 17 Oct 2024 15:27:19 -0400
> Subject: [PATCH] fixup! migration: Support periodic RAMBlock dirty bitmap sync
>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
>  migration/cpu-throttle.c | 11 ++++++++---
>  1 file changed, 8 insertions(+), 3 deletions(-)
>
> diff --git a/migration/cpu-throttle.c b/migration/cpu-throttle.c
> index 342681cdd4..3df287d8d3 100644
> --- a/migration/cpu-throttle.c
> +++ b/migration/cpu-throttle.c
> @@ -36,6 +36,7 @@
>  static QEMUTimer *throttle_timer, *throttle_dirty_sync_timer;
>  static unsigned int throttle_percentage;
>  static bool throttle_dirty_sync_timer_active;
> +static uint64_t throttle_dirty_sync_count_prev;
>  
>  #define CPU_THROTTLE_PCT_MIN 1
>  #define CPU_THROTTLE_PCT_MAX 99
> @@ -133,7 +134,6 @@ int cpu_throttle_get_percentage(void)
>  
>  void cpu_throttle_dirty_sync_timer_tick(void *opaque)
>  {
> -    static uint64_t prev_sync_cnt;
>      uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
>  
>      /*
> @@ -145,7 +145,7 @@ void cpu_throttle_dirty_sync_timer_tick(void *opaque)
>          goto end;
>      }
>  
> -    if (sync_cnt == prev_sync_cnt) {
> +    if (sync_cnt == throttle_dirty_sync_count_prev) {
>          trace_cpu_throttle_dirty_sync();
>          WITH_RCU_READ_LOCK_GUARD() {
>              migration_bitmap_sync_precopy(false);
> @@ -153,7 +153,7 @@ void cpu_throttle_dirty_sync_timer_tick(void *opaque)
>      }
>  
>  end:
> -    prev_sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
> +    throttle_dirty_sync_count_prev = stat64_get(&mig_stats.dirty_sync_count);
>  
>      timer_mod(throttle_dirty_sync_timer,
>          qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
> @@ -171,6 +171,11 @@ void cpu_throttle_dirty_sync_timer(bool enable)
>  
>      if (enable) {
>          if (!cpu_throttle_dirty_sync_active()) {
> +            /*
> +             * Always reset the dirty sync count cache, in case migration
> +             * was cancelled once.
> +             */
> +            throttle_dirty_sync_count_prev = 0;
>              timer_mod(throttle_dirty_sync_timer,
>                  qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
>                      CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
> -- 
> 2.45.0

LGTM
Yong Huang Oct. 18, 2024, 1:55 a.m. UTC | #4
On Fri, Oct 18, 2024 at 3:33 AM Peter Xu <peterx@redhat.com> wrote:

> On Thu, Oct 17, 2024 at 02:42:54PM +0800, yong.huang@smartx.com wrote:
> > +void cpu_throttle_dirty_sync_timer_tick(void *opaque)
> > +{
> > +    static uint64_t prev_sync_cnt;
>
> We may need to reset this in case migration got cancelled and invoked
> again, to make sure it keeps working in the 2nd run.
>
> > +    uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
> > +
> > +    /*
> > +     * The first iteration copies all memory anyhow and has no
> > +     * effect on guest performance, therefore omit it to avoid
> > +     * paying extra for the sync penalty.
> > +     */
> > +    if (sync_cnt <= 1) {
> > +        goto end;
> > +    }
> > +
> > +    if (sync_cnt == prev_sync_cnt) {
> > +        trace_cpu_throttle_dirty_sync();
> > +        WITH_RCU_READ_LOCK_GUARD() {
> > +            migration_bitmap_sync_precopy(false);
> > +        }
> > +    }
> > +
> > +end:
> > +    prev_sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
> > +
> > +    timer_mod(throttle_dirty_sync_timer,
> > +        qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
> > +            CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
> > +}
>
> Please both of you have a look on whether you agree I squash below into
> this patch when merge:
>

Thanks for the fixup, it looks good to me.


>
> ===8<===
> From 84a2544eab73e35dbd35fed3b1440169915f9aa4 Mon Sep 17 00:00:00 2001
> From: Peter Xu <peterx@redhat.com>
> Date: Thu, 17 Oct 2024 15:27:19 -0400
> Subject: [PATCH] fixup! migration: Support periodic RAMBlock dirty bitmap
> sync
>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
>  migration/cpu-throttle.c | 11 ++++++++---
>  1 file changed, 8 insertions(+), 3 deletions(-)
>
> diff --git a/migration/cpu-throttle.c b/migration/cpu-throttle.c
> index 342681cdd4..3df287d8d3 100644
> --- a/migration/cpu-throttle.c
> +++ b/migration/cpu-throttle.c
> @@ -36,6 +36,7 @@
>  static QEMUTimer *throttle_timer, *throttle_dirty_sync_timer;
>  static unsigned int throttle_percentage;
>  static bool throttle_dirty_sync_timer_active;
> +static uint64_t throttle_dirty_sync_count_prev;
>
>  #define CPU_THROTTLE_PCT_MIN 1
>  #define CPU_THROTTLE_PCT_MAX 99
> @@ -133,7 +134,6 @@ int cpu_throttle_get_percentage(void)
>
>  void cpu_throttle_dirty_sync_timer_tick(void *opaque)
>  {
> -    static uint64_t prev_sync_cnt;
>      uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
>
>      /*
> @@ -145,7 +145,7 @@ void cpu_throttle_dirty_sync_timer_tick(void *opaque)
>          goto end;
>      }
>
> -    if (sync_cnt == prev_sync_cnt) {
> +    if (sync_cnt == throttle_dirty_sync_count_prev) {
>          trace_cpu_throttle_dirty_sync();
>          WITH_RCU_READ_LOCK_GUARD() {
>              migration_bitmap_sync_precopy(false);
> @@ -153,7 +153,7 @@ void cpu_throttle_dirty_sync_timer_tick(void *opaque)
>      }
>
>  end:
> -    prev_sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
> +    throttle_dirty_sync_count_prev =
> stat64_get(&mig_stats.dirty_sync_count);
>
>      timer_mod(throttle_dirty_sync_timer,
>          qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
> @@ -171,6 +171,11 @@ void cpu_throttle_dirty_sync_timer(bool enable)
>
>      if (enable) {
>          if (!cpu_throttle_dirty_sync_active()) {
> +            /*
> +             * Always reset the dirty sync count cache, in case migration
> +             * was cancelled once.
> +             */
> +            throttle_dirty_sync_count_prev = 0;
>              timer_mod(throttle_dirty_sync_timer,
>                  qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
>                      CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
> --
> 2.45.0
>
> --
> Peter Xu
>
>
Acked-by: Hyman Huang <yong.huang@smartx.com>
diff mbox series

Patch

diff --git a/migration/cpu-throttle.c b/migration/cpu-throttle.c
index fa47ee2e21..342681cdd4 100644
--- a/migration/cpu-throttle.c
+++ b/migration/cpu-throttle.c
@@ -28,16 +28,22 @@ 
 #include "qemu/main-loop.h"
 #include "sysemu/cpus.h"
 #include "cpu-throttle.h"
+#include "migration.h"
+#include "migration-stats.h"
 #include "trace.h"
 
 /* vcpu throttling controls */
-static QEMUTimer *throttle_timer;
+static QEMUTimer *throttle_timer, *throttle_dirty_sync_timer;
 static unsigned int throttle_percentage;
+static bool throttle_dirty_sync_timer_active;
 
 #define CPU_THROTTLE_PCT_MIN 1
 #define CPU_THROTTLE_PCT_MAX 99
 #define CPU_THROTTLE_TIMESLICE_NS 10000000
 
+/* Making sure RAMBlock dirty bitmap is synchronized every five seconds */
+#define CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS 5000
+
 static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
 {
     double pct;
@@ -112,6 +118,7 @@  void cpu_throttle_set(int new_throttle_pct)
 void cpu_throttle_stop(void)
 {
     qatomic_set(&throttle_percentage, 0);
+    cpu_throttle_dirty_sync_timer(false);
 }
 
 bool cpu_throttle_active(void)
@@ -124,8 +131,64 @@  int cpu_throttle_get_percentage(void)
     return qatomic_read(&throttle_percentage);
 }
 
+void cpu_throttle_dirty_sync_timer_tick(void *opaque)
+{
+    static uint64_t prev_sync_cnt;
+    uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
+
+    /*
+     * The first iteration copies all memory anyhow and has no
+     * effect on guest performance, therefore omit it to avoid
+     * paying extra for the sync penalty.
+     */
+    if (sync_cnt <= 1) {
+        goto end;
+    }
+
+    if (sync_cnt == prev_sync_cnt) {
+        trace_cpu_throttle_dirty_sync();
+        WITH_RCU_READ_LOCK_GUARD() {
+            migration_bitmap_sync_precopy(false);
+        }
+    }
+
+end:
+    prev_sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
+
+    timer_mod(throttle_dirty_sync_timer,
+        qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
+            CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
+}
+
+static bool cpu_throttle_dirty_sync_active(void)
+{
+    return qatomic_read(&throttle_dirty_sync_timer_active);
+}
+
+void cpu_throttle_dirty_sync_timer(bool enable)
+{
+    assert(throttle_dirty_sync_timer);
+
+    if (enable) {
+        if (!cpu_throttle_dirty_sync_active()) {
+            timer_mod(throttle_dirty_sync_timer,
+                qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
+                    CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
+            qatomic_set(&throttle_dirty_sync_timer_active, 1);
+        }
+    } else {
+        if (cpu_throttle_dirty_sync_active()) {
+            timer_del(throttle_dirty_sync_timer);
+            qatomic_set(&throttle_dirty_sync_timer_active, 0);
+        }
+    }
+}
+
 void cpu_throttle_init(void)
 {
     throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
                                   cpu_throttle_timer_tick, NULL);
+    throttle_dirty_sync_timer =
+        timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
+                     cpu_throttle_dirty_sync_timer_tick, NULL);
 }
diff --git a/migration/cpu-throttle.h b/migration/cpu-throttle.h
index d65bdef6d0..420702b8d3 100644
--- a/migration/cpu-throttle.h
+++ b/migration/cpu-throttle.h
@@ -65,4 +65,18 @@  bool cpu_throttle_active(void);
  */
 int cpu_throttle_get_percentage(void);
 
+/**
+ * cpu_throttle_dirty_sync_timer_tick:
+ *
+ * Dirty sync timer hook.
+ */
+void cpu_throttle_dirty_sync_timer_tick(void *opaque);
+
+/**
+ * cpu_throttle_dirty_sync_timer:
+ *
+ * Start or stop the dirty sync timer.
+ */
+void cpu_throttle_dirty_sync_timer(bool enable);
+
 #endif /* SYSEMU_CPU_THROTTLE_H */
diff --git a/migration/migration.c b/migration/migration.c
index 2e10ca77af..f673e30069 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -3291,12 +3291,17 @@  static MigIterateState migration_iteration_run(MigrationState *s)
 
 static void migration_iteration_finish(MigrationState *s)
 {
-    /* If we enabled cpu throttling for auto-converge, turn it off. */
+    bql_lock();
+
+    /*
+     * If we enabled cpu throttling for auto-converge, turn it off.
+     * Stopping CPU throttle should be serialized by BQL to avoid
+     * racing for the throttle_dirty_sync_timer.
+     */
     if (migrate_auto_converge()) {
         cpu_throttle_stop();
     }
 
-    bql_lock();
     switch (s->state) {
     case MIGRATION_STATUS_COMPLETED:
         runstate_set(RUN_STATE_POSTMIGRATE);
@@ -3513,6 +3518,11 @@  static void *migration_thread(void *opaque)
         qemu_savevm_send_colo_enable(s->to_dst_file);
     }
 
+    if (migrate_auto_converge()) {
+        /* Start RAMBlock dirty bitmap sync timer */
+        cpu_throttle_dirty_sync_timer(true);
+    }
+
     bql_lock();
     ret = qemu_savevm_state_setup(s->to_dst_file, &local_err);
     bql_unlock();
diff --git a/migration/migration.h b/migration/migration.h
index 38aa1402d5..fbd0d19092 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -537,4 +537,5 @@  int migration_rp_wait(MigrationState *s);
  */
 void migration_rp_kick(MigrationState *s);
 
+void migration_bitmap_sync_precopy(bool last_stage);
 #endif
diff --git a/migration/ram.c b/migration/ram.c
index 9b5b350405..d284f63854 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1088,7 +1088,7 @@  static void migration_bitmap_sync(RAMState *rs, bool last_stage)
     }
 }
 
-static void migration_bitmap_sync_precopy(bool last_stage)
+void migration_bitmap_sync_precopy(bool last_stage)
 {
     Error *local_err = NULL;
     assert(ram_state);
diff --git a/migration/trace-events b/migration/trace-events
index 9a19599804..0638183056 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -381,3 +381,4 @@  migration_pagecache_insert(void) "Error allocating page"
 
 # cpu-throttle.c
 cpu_throttle_set(int new_throttle_pct)  "set guest CPU throttled by %d%%"
+cpu_throttle_dirty_sync(void) ""