diff mbox series

[v1,5/7] migration: Support background dirty bitmap sync and throttle

Message ID d74bc4ffb073c886bc566e7d771910db844cec1b.1726390099.git.yong.huang@smartx.com (mailing list archive)
State New
Headers show
Series migration: auto-converge refinements for huge VM | expand

Commit Message

Yong Huang Sept. 15, 2024, 4:08 p.m. UTC
When VM is configured with huge memory, the current throttle logic
doesn't look like to scale, because migration_trigger_throttle()
is only called for each iteration, so it won't be invoked for a long
time if one iteration can take a long time.

The background sync and throttle aim to fix the above issue by
synchronizing the remote dirty bitmap and triggering the throttle
once detect that iteration lasts a long time.

This is a trade-off between synchronization overhead and CPU throttle
impact.

Signed-off-by: Hyman Huang <yong.huang@smartx.com>
---
 migration/migration.c        | 12 +++++++++++
 tests/qtest/migration-test.c | 39 ++++++++++++++++++++++++++++++++++++
 2 files changed, 51 insertions(+)

Comments

Fabiano Rosas Sept. 16, 2024, 8:50 p.m. UTC | #1
Hyman Huang <yong.huang@smartx.com> writes:

> When VM is configured with huge memory, the current throttle logic
> doesn't look like to scale, because migration_trigger_throttle()
> is only called for each iteration, so it won't be invoked for a long
> time if one iteration can take a long time.
>
> The background sync and throttle aim to fix the above issue by
> synchronizing the remote dirty bitmap and triggering the throttle
> once detect that iteration lasts a long time.
>
> This is a trade-off between synchronization overhead and CPU throttle
> impact.
>
> Signed-off-by: Hyman Huang <yong.huang@smartx.com>
> ---
>  migration/migration.c        | 12 +++++++++++
>  tests/qtest/migration-test.c | 39 ++++++++++++++++++++++++++++++++++++
>  2 files changed, 51 insertions(+)
>
> diff --git a/migration/migration.c b/migration/migration.c
> index 055d527ff6..af8b22fa15 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -1416,6 +1416,7 @@ static void migrate_fd_cleanup(MigrationState *s)
>  
>          trace_migrate_fd_cleanup();
>          bql_unlock();
> +        migration_background_sync_cleanup();
>          if (s->migration_thread_running) {
>              qemu_thread_join(&s->thread);
>              s->migration_thread_running = false;
> @@ -3263,6 +3264,7 @@ static MigIterateState migration_iteration_run(MigrationState *s)
>  
>      if ((!pending_size || pending_size < s->threshold_size) && can_switchover) {
>          trace_migration_thread_low_pending(pending_size);
> +        migration_background_sync_cleanup();

This one is redundant with the migrate_fd_cleanup() call at the end of
migration_iteration_finish().

>          migration_completion(s);
>          return MIG_ITERATE_BREAK;
>      }
> @@ -3508,6 +3510,16 @@ static void *migration_thread(void *opaque)
>      ret = qemu_savevm_state_setup(s->to_dst_file, &local_err);
>      bql_unlock();
>  
> +    if (!migrate_dirty_limit()) {
> +        /*
> +         * Initiate the background sync watcher in order to guarantee
> +         * that the CPU throttling acts appropriately. Dirty Limit
> +         * doesn't use CPU throttle to make guest down, so ignore that
> +         * case.
> +         */
> +        migration_background_sync_setup();
> +    }
> +
>      qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
>                                 MIGRATION_STATUS_ACTIVE);
>  
> diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c
> index b796a90cad..e0e94d26be 100644
> --- a/tests/qtest/migration-test.c
> +++ b/tests/qtest/migration-test.c
> @@ -281,6 +281,11 @@ static uint64_t get_migration_pass(QTestState *who)
>      return read_ram_property_int(who, "iteration-count");
>  }
>  
> +static uint64_t get_dirty_sync_count(QTestState *who)
> +{
> +    return read_ram_property_int(who, "dirty-sync-count");
> +}
> +
>  static void read_blocktime(QTestState *who)
>  {
>      QDict *rsp_return;
> @@ -468,6 +473,12 @@ static void migrate_ensure_converge(QTestState *who)
>      migrate_set_parameter_int(who, "downtime-limit", 30 * 1000);
>  }
>  
> +static void migrate_ensure_iteration_last_long(QTestState *who)
> +{
> +    /* Set 10Byte/s bandwidth limit to make the iteration last long enough */
> +    migrate_set_parameter_int(who, "max-bandwidth", 10);
> +}
> +
>  /*
>   * Our goal is to ensure that we run a single full migration
>   * iteration, and also dirty memory, ensuring that at least
> @@ -2791,6 +2802,10 @@ static void test_migrate_auto_converge(void)
>       * so we need to decrease a bandwidth.
>       */
>      const int64_t init_pct = 5, inc_pct = 25, max_pct = 95;
> +    uint64_t prev_iter_cnt = 0, iter_cnt;
> +    uint64_t iter_cnt_changes = 0;
> +    uint64_t prev_dirty_sync_cnt = 0, dirty_sync_cnt;
> +    uint64_t dirty_sync_cnt_changes = 0;
>  
>      if (test_migrate_start(&from, &to, uri, &args)) {
>          return;
> @@ -2827,6 +2842,30 @@ static void test_migrate_auto_converge(void)
>      } while (true);
>      /* The first percentage of throttling should be at least init_pct */
>      g_assert_cmpint(percentage, >=, init_pct);
> +
> +    /* Make sure the iteration take a long time enough */
> +    migrate_ensure_iteration_last_long(from);
> +
> +    /*
> +     * End the loop when the dirty sync count or iteration count changes.
> +     */
> +    while (iter_cnt_changes < 2 && dirty_sync_cnt_changes < 2) {
> +        usleep(1000 * 1000);
> +        iter_cnt = get_migration_pass(from);
> +        iter_cnt_changes += (iter_cnt != prev_iter_cnt);
> +        prev_iter_cnt = iter_cnt;
> +
> +        dirty_sync_cnt = get_dirty_sync_count(from);
> +        dirty_sync_cnt_changes += (dirty_sync_cnt != prev_dirty_sync_cnt);
> +        prev_dirty_sync_cnt = dirty_sync_cnt;
> +    }
> +
> +    /*
> +     * The dirty sync count must have changed because we are in the same
> +     * iteration.
> +     */
> +    g_assert_cmpint(iter_cnt_changes , < , dirty_sync_cnt_changes);
> +
>      /* Now, when we tested that throttling works, let it converge */
>      migrate_ensure_converge(from);
diff mbox series

Patch

diff --git a/migration/migration.c b/migration/migration.c
index 055d527ff6..af8b22fa15 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1416,6 +1416,7 @@  static void migrate_fd_cleanup(MigrationState *s)
 
         trace_migrate_fd_cleanup();
         bql_unlock();
+        migration_background_sync_cleanup();
         if (s->migration_thread_running) {
             qemu_thread_join(&s->thread);
             s->migration_thread_running = false;
@@ -3263,6 +3264,7 @@  static MigIterateState migration_iteration_run(MigrationState *s)
 
     if ((!pending_size || pending_size < s->threshold_size) && can_switchover) {
         trace_migration_thread_low_pending(pending_size);
+        migration_background_sync_cleanup();
         migration_completion(s);
         return MIG_ITERATE_BREAK;
     }
@@ -3508,6 +3510,16 @@  static void *migration_thread(void *opaque)
     ret = qemu_savevm_state_setup(s->to_dst_file, &local_err);
     bql_unlock();
 
+    if (!migrate_dirty_limit()) {
+        /*
+         * Initiate the background sync watcher in order to guarantee
+         * that the CPU throttling acts appropriately. Dirty Limit
+         * doesn't use CPU throttle to make guest down, so ignore that
+         * case.
+         */
+        migration_background_sync_setup();
+    }
+
     qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
                                MIGRATION_STATUS_ACTIVE);
 
diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c
index b796a90cad..e0e94d26be 100644
--- a/tests/qtest/migration-test.c
+++ b/tests/qtest/migration-test.c
@@ -281,6 +281,11 @@  static uint64_t get_migration_pass(QTestState *who)
     return read_ram_property_int(who, "iteration-count");
 }
 
+static uint64_t get_dirty_sync_count(QTestState *who)
+{
+    return read_ram_property_int(who, "dirty-sync-count");
+}
+
 static void read_blocktime(QTestState *who)
 {
     QDict *rsp_return;
@@ -468,6 +473,12 @@  static void migrate_ensure_converge(QTestState *who)
     migrate_set_parameter_int(who, "downtime-limit", 30 * 1000);
 }
 
+static void migrate_ensure_iteration_last_long(QTestState *who)
+{
+    /* Set 10Byte/s bandwidth limit to make the iteration last long enough */
+    migrate_set_parameter_int(who, "max-bandwidth", 10);
+}
+
 /*
  * Our goal is to ensure that we run a single full migration
  * iteration, and also dirty memory, ensuring that at least
@@ -2791,6 +2802,10 @@  static void test_migrate_auto_converge(void)
      * so we need to decrease a bandwidth.
      */
     const int64_t init_pct = 5, inc_pct = 25, max_pct = 95;
+    uint64_t prev_iter_cnt = 0, iter_cnt;
+    uint64_t iter_cnt_changes = 0;
+    uint64_t prev_dirty_sync_cnt = 0, dirty_sync_cnt;
+    uint64_t dirty_sync_cnt_changes = 0;
 
     if (test_migrate_start(&from, &to, uri, &args)) {
         return;
@@ -2827,6 +2842,30 @@  static void test_migrate_auto_converge(void)
     } while (true);
     /* The first percentage of throttling should be at least init_pct */
     g_assert_cmpint(percentage, >=, init_pct);
+
+    /* Make sure the iteration take a long time enough */
+    migrate_ensure_iteration_last_long(from);
+
+    /*
+     * End the loop when the dirty sync count or iteration count changes.
+     */
+    while (iter_cnt_changes < 2 && dirty_sync_cnt_changes < 2) {
+        usleep(1000 * 1000);
+        iter_cnt = get_migration_pass(from);
+        iter_cnt_changes += (iter_cnt != prev_iter_cnt);
+        prev_iter_cnt = iter_cnt;
+
+        dirty_sync_cnt = get_dirty_sync_count(from);
+        dirty_sync_cnt_changes += (dirty_sync_cnt != prev_dirty_sync_cnt);
+        prev_dirty_sync_cnt = dirty_sync_cnt;
+    }
+
+    /*
+     * The dirty sync count must have changed because we are in the same
+     * iteration.
+     */
+    g_assert_cmpint(iter_cnt_changes , < , dirty_sync_cnt_changes);
+
     /* Now, when we tested that throttling works, let it converge */
     migrate_ensure_converge(from);