diff mbox series

[v4,07/10] migration: do not flush_compressed_data at the end of each iteration

Message ID 20180821081029.26121-8-xiaoguangrong@tencent.com (mailing list archive)
State New, archived
Headers show
Series migration: compression optimization | expand

Commit Message

Xiao Guangrong Aug. 21, 2018, 8:10 a.m. UTC
From: Xiao Guangrong <xiaoguangrong@tencent.com>

flush_compressed_data() needs to wait all compression threads to
finish their work, after that all threads are free until the
migration feeds new request to them, reducing its call can improve
the throughput and use CPU resource more effectively

We do not need to flush all threads at the end of iteration, the
data can be kept locally until the memory block is changed or
memory migration starts over in that case we will meet a dirtied
page which may still exists in compression threads's ring

Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
---
 migration/ram.c | 90 +++++++++++++++++++++++++++++++--------------------------
 1 file changed, 49 insertions(+), 41 deletions(-)

Comments

Peter Xu Aug. 22, 2018, 4:56 a.m. UTC | #1
On Tue, Aug 21, 2018 at 04:10:26PM +0800, guangrong.xiao@gmail.com wrote:
> From: Xiao Guangrong <xiaoguangrong@tencent.com>
> 
> flush_compressed_data() needs to wait all compression threads to
> finish their work, after that all threads are free until the
> migration feeds new request to them, reducing its call can improve
> the throughput and use CPU resource more effectively
> 
> We do not need to flush all threads at the end of iteration, the
> data can be kept locally until the memory block is changed or
> memory migration starts over in that case we will meet a dirtied
> page which may still exists in compression threads's ring

You forgot to remove the line in ram_save_iterate(), didn't you? :)

> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
> ---
>  migration/ram.c | 90 +++++++++++++++++++++++++++++++--------------------------
>  1 file changed, 49 insertions(+), 41 deletions(-)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index 99ecf9b315..1d54285501 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -1602,6 +1602,47 @@ static void migration_update_rates(RAMState *rs, int64_t end_time)
>      }
>  }
>  
> +static void
> +update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
> +{
> +    if (param->zero_page) {
> +        ram_counters.duplicate++;
> +    }
> +    ram_counters.transferred += bytes_xmit;
> +}
> +
> +static void flush_compressed_data(RAMState *rs)

If no content change in these two functions I would rather just
declare flush_compressed_data() at the beginning of the file which is
oneliner.  What do you think?

Regards,
diff mbox series

Patch

diff --git a/migration/ram.c b/migration/ram.c
index 99ecf9b315..1d54285501 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1602,6 +1602,47 @@  static void migration_update_rates(RAMState *rs, int64_t end_time)
     }
 }
 
+static void
+update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
+{
+    if (param->zero_page) {
+        ram_counters.duplicate++;
+    }
+    ram_counters.transferred += bytes_xmit;
+}
+
+static void flush_compressed_data(RAMState *rs)
+{
+    int idx, len, thread_count;
+
+    if (!migrate_use_compression()) {
+        return;
+    }
+    thread_count = migrate_compress_threads();
+
+    qemu_mutex_lock(&comp_done_lock);
+    for (idx = 0; idx < thread_count; idx++) {
+        while (!comp_param[idx].done) {
+            qemu_cond_wait(&comp_done_cond, &comp_done_lock);
+        }
+    }
+    qemu_mutex_unlock(&comp_done_lock);
+
+    for (idx = 0; idx < thread_count; idx++) {
+        qemu_mutex_lock(&comp_param[idx].mutex);
+        if (!comp_param[idx].quit) {
+            len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
+            /*
+             * it's safe to fetch zero_page without holding comp_done_lock
+             * as there is no further request submitted to the thread,
+             * i.e, the thread should be waiting for a request at this point.
+             */
+            update_compress_thread_counts(&comp_param[idx], len);
+        }
+        qemu_mutex_unlock(&comp_param[idx].mutex);
+    }
+}
+
 static void migration_bitmap_sync(RAMState *rs)
 {
     RAMBlock *block;
@@ -1610,6 +1651,14 @@  static void migration_bitmap_sync(RAMState *rs)
 
     ram_counters.dirty_sync_count++;
 
+    /*
+     * if memory migration starts over, we will meet a dirtied page which
+     * may still exists in compression threads's ring, so we should flush
+     * the compressed data to make sure the new page is not overwritten by
+     * the old one in the destination.
+     */
+    flush_compressed_data(rs);
+
     if (!rs->time_last_bitmap_sync) {
         rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
     }
@@ -1878,47 +1927,6 @@  exit:
     return zero_page;
 }
 
-static void
-update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
-{
-    if (param->zero_page) {
-        ram_counters.duplicate++;
-    }
-    ram_counters.transferred += bytes_xmit;
-}
-
-static void flush_compressed_data(RAMState *rs)
-{
-    int idx, len, thread_count;
-
-    if (!migrate_use_compression()) {
-        return;
-    }
-    thread_count = migrate_compress_threads();
-
-    qemu_mutex_lock(&comp_done_lock);
-    for (idx = 0; idx < thread_count; idx++) {
-        while (!comp_param[idx].done) {
-            qemu_cond_wait(&comp_done_cond, &comp_done_lock);
-        }
-    }
-    qemu_mutex_unlock(&comp_done_lock);
-
-    for (idx = 0; idx < thread_count; idx++) {
-        qemu_mutex_lock(&comp_param[idx].mutex);
-        if (!comp_param[idx].quit) {
-            len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
-            /*
-             * it's safe to fetch zero_page without holding comp_done_lock
-             * as there is no further request submitted to the thread,
-             * i.e, the thread should be waiting for a request at this point.
-             */
-            update_compress_thread_counts(&comp_param[idx], len);
-        }
-        qemu_mutex_unlock(&comp_param[idx].mutex);
-    }
-}
-
 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
                                        ram_addr_t offset)
 {