diff mbox series

[1/1] migration: Non multifd migration don't care about multifd flushes

Message ID 20231011205548.10571-2-quintela@redhat.com (mailing list archive)
State New, archived
Headers show
Series migration: Fix non-multifd migration | expand

Commit Message

Juan Quintela Oct. 11, 2023, 8:55 p.m. UTC
RDMA was having trouble because
migrate_multifd_flush_after_each_section() can only be true or false,
but we don't want to send any flush when we are not in multifd
migration.

CC: Fabiano Rosas <farosas@suse.de
Fixes: 294e5a4034e81b3d8db03b4e0f691386f20d6ed3
       multifd: Only flush once each full round of memory
Reported-by: Li Zhijian <lizhijian@fujitsu.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 migration/ram.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

Comments

Li Zhijian Oct. 12, 2023, 1:14 a.m. UTC | #1
On 12/10/2023 04:55, Juan Quintela wrote:
> RDMA was having trouble because
> migrate_multifd_flush_after_each_section() can only be true or false,
> but we don't want to send any flush when we are not in multifd
> migration.
> 
> CC: Fabiano Rosas <farosas@suse.de
> Fixes: 294e5a4034e81b3d8db03b4e0f691386f20d6ed3
>         multifd: Only flush once each full round of memory

Shouldn't the Fixes tag be like[1]
Fixes: 294e5a4034e ("multifd: Only flush once each full round of memory")

[1] https://www.qemu.org/docs/master/devel/submitting-a-patch.html#id37


> Reported-by: Li Zhijian <lizhijian@fujitsu.com>
> Signed-off-by: Juan Quintela <quintela@redhat.com>

Reviewed-by: Li Zhijian <lizhijian@fujitsu.com>



> ---
>   migration/ram.c | 14 ++++++++------
>   1 file changed, 8 insertions(+), 6 deletions(-)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index 2f5ce4d60b..d25bff2496 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -1395,7 +1395,8 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
>           pss->page = 0;
>           pss->block = QLIST_NEXT_RCU(pss->block, next);
>           if (!pss->block) {
> -            if (!migrate_multifd_flush_after_each_section()) {
> +            if (migrate_multifd() &&
> +                !migrate_multifd_flush_after_each_section()) {
>                   QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
>                   int ret = multifd_send_sync_main(f);
>                   if (ret < 0) {
> @@ -3072,7 +3073,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
>           return ret;
>       }
>   
> -    if (!migrate_multifd_flush_after_each_section()) {
> +    if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
>           qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
>       }
>   
> @@ -3184,7 +3185,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
>   out:
>       if (ret >= 0
>           && migration_is_setup_or_active(migrate_get_current()->state)) {
> -        if (migrate_multifd_flush_after_each_section()) {
> +        if (migrate_multifd() && migrate_multifd_flush_after_each_section()) {
>               ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel);
>               if (ret < 0) {
>                   return ret;
> @@ -3261,7 +3262,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
>           return ret;
>       }
>   
> -    if (!migrate_multifd_flush_after_each_section()) {
> +    if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
>           qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
>       }
>       qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
> @@ -3768,7 +3769,7 @@ int ram_load_postcopy(QEMUFile *f, int channel)
>               break;
>           case RAM_SAVE_FLAG_EOS:
>               /* normal exit */
> -            if (migrate_multifd_flush_after_each_section()) {
> +            if (migrate_multifd() && migrate_multifd_flush_after_each_section()) {
>                   multifd_recv_sync_main();
>               }
>               break;
> @@ -4046,7 +4047,8 @@ static int ram_load_precopy(QEMUFile *f)
>               break;
>           case RAM_SAVE_FLAG_EOS:
>               /* normal exit */
> -            if (migrate_multifd_flush_after_each_section()) {
> +            if (migrate_multifd() &&
> +                migrate_multifd_flush_after_each_section()) {
>                   multifd_recv_sync_main();
>               }
>               break;
Peter Xu Oct. 12, 2023, 1:03 p.m. UTC | #2
On Wed, Oct 11, 2023 at 10:55:48PM +0200, Juan Quintela wrote:
> RDMA was having trouble because
> migrate_multifd_flush_after_each_section() can only be true or false,
> but we don't want to send any flush when we are not in multifd
> migration.
> 
> CC: Fabiano Rosas <farosas@suse.de
> Fixes: 294e5a4034e81b3d8db03b4e0f691386f20d6ed3
>        multifd: Only flush once each full round of memory
> Reported-by: Li Zhijian <lizhijian@fujitsu.com>
> Signed-off-by: Juan Quintela <quintela@redhat.com>

Reviewed-by: Peter Xu <peterx@redhat.com>
diff mbox series

Patch

diff --git a/migration/ram.c b/migration/ram.c
index 2f5ce4d60b..d25bff2496 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1395,7 +1395,8 @@  static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
         pss->page = 0;
         pss->block = QLIST_NEXT_RCU(pss->block, next);
         if (!pss->block) {
-            if (!migrate_multifd_flush_after_each_section()) {
+            if (migrate_multifd() &&
+                !migrate_multifd_flush_after_each_section()) {
                 QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
                 int ret = multifd_send_sync_main(f);
                 if (ret < 0) {
@@ -3072,7 +3073,7 @@  static int ram_save_setup(QEMUFile *f, void *opaque)
         return ret;
     }
 
-    if (!migrate_multifd_flush_after_each_section()) {
+    if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
         qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
     }
 
@@ -3184,7 +3185,7 @@  static int ram_save_iterate(QEMUFile *f, void *opaque)
 out:
     if (ret >= 0
         && migration_is_setup_or_active(migrate_get_current()->state)) {
-        if (migrate_multifd_flush_after_each_section()) {
+        if (migrate_multifd() && migrate_multifd_flush_after_each_section()) {
             ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel);
             if (ret < 0) {
                 return ret;
@@ -3261,7 +3262,7 @@  static int ram_save_complete(QEMUFile *f, void *opaque)
         return ret;
     }
 
-    if (!migrate_multifd_flush_after_each_section()) {
+    if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
         qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
     }
     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
@@ -3768,7 +3769,7 @@  int ram_load_postcopy(QEMUFile *f, int channel)
             break;
         case RAM_SAVE_FLAG_EOS:
             /* normal exit */
-            if (migrate_multifd_flush_after_each_section()) {
+            if (migrate_multifd() && migrate_multifd_flush_after_each_section()) {
                 multifd_recv_sync_main();
             }
             break;
@@ -4046,7 +4047,8 @@  static int ram_load_precopy(QEMUFile *f)
             break;
         case RAM_SAVE_FLAG_EOS:
             /* normal exit */
-            if (migrate_multifd_flush_after_each_section()) {
+            if (migrate_multifd() &&
+                migrate_multifd_flush_after_each_section()) {
                 multifd_recv_sync_main();
             }
             break;