diff mbox series

[v2,1/6] migration/multifd: move Params update and pages cleanup into multifd_send_fill_packet()

Message ID 20191026004520.5515-2-richardw.yang@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series migration/multifd: a new mechanism for send thread sync | expand

Commit Message

Wei Yang Oct. 26, 2019, 12:45 a.m. UTC
Fill data and update/cleanup related field in one place. Also make the
code a little clean.

Signed-off-by: Wei Yang <richardw.yang@linux.intel.com>
---
 migration/ram.c | 20 +++++++++++---------
 1 file changed, 11 insertions(+), 9 deletions(-)

Comments

Juan Quintela Nov. 19, 2019, 10:57 a.m. UTC | #1
Wei Yang <richardw.yang@linux.intel.com> wrote:
> Fill data and update/cleanup related field in one place. Also make the
> code a little clean.
>
> Signed-off-by: Wei Yang <richardw.yang@linux.intel.com>

Reviewed-by: Juan Quintela <quintela@redhat.com>

right cleanup.
Wei Yang Nov. 29, 2019, 8:30 a.m. UTC | #2
On Tue, Nov 19, 2019 at 11:57:22AM +0100, Juan Quintela wrote:
>Wei Yang <richardw.yang@linux.intel.com> wrote:
>> Fill data and update/cleanup related field in one place. Also make the
>> code a little clean.
>>
>> Signed-off-by: Wei Yang <richardw.yang@linux.intel.com>
>
>Reviewed-by: Juan Quintela <quintela@redhat.com>
>
>right cleanup.
>

Hi, Juan

Do you have other comments on the following patches?
diff mbox series

Patch

diff --git a/migration/ram.c b/migration/ram.c
index 5876054195..35f147388b 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -789,15 +789,16 @@  static void multifd_pages_clear(MultiFDPages_t *pages)
     g_free(pages);
 }
 
-static void multifd_send_fill_packet(MultiFDSendParams *p)
+static void multifd_send_fill_packet(MultiFDSendParams *p, uint32_t used)
 {
     MultiFDPacket_t *packet = p->packet;
+    uint32_t next_packet_size = used * qemu_target_page_size();
     int i;
 
     packet->flags = cpu_to_be32(p->flags);
     packet->pages_alloc = cpu_to_be32(p->pages->allocated);
     packet->pages_used = cpu_to_be32(p->pages->used);
-    packet->next_packet_size = cpu_to_be32(p->next_packet_size);
+    packet->next_packet_size = cpu_to_be32(next_packet_size);
     packet->packet_num = cpu_to_be64(p->packet_num);
 
     if (p->pages->block) {
@@ -807,6 +808,13 @@  static void multifd_send_fill_packet(MultiFDSendParams *p)
     for (i = 0; i < p->pages->used; i++) {
         packet->offset[i] = cpu_to_be64(p->pages->offset[i]);
     }
+
+    p->next_packet_size = next_packet_size;
+    p->flags = 0;
+    p->num_packets++;
+    p->num_pages += used;
+    p->pages->used = 0;
+    p->pages->block = NULL;
 }
 
 static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
@@ -1109,13 +1117,7 @@  static void *multifd_send_thread(void *opaque)
             uint64_t packet_num = p->packet_num;
             flags = p->flags;
 
-            p->next_packet_size = used * qemu_target_page_size();
-            multifd_send_fill_packet(p);
-            p->flags = 0;
-            p->num_packets++;
-            p->num_pages += used;
-            p->pages->used = 0;
-            p->pages->block = NULL;
+            multifd_send_fill_packet(p, used);
             qemu_mutex_unlock(&p->mutex);
 
             trace_multifd_send(p->id, packet_num, used, flags,