diff mbox series

[v2,2/4] multifd: document packet_len, next_packet_size

Message ID 20231011184358.97349-3-elena.ufimtseva@oracle.com (mailing list archive)
State New, archived
Headers show
Series multifd: various fixes | expand

Commit Message

Elena Ufimtseva Oct. 11, 2023, 6:43 p.m. UTC
next_packet_size name is a bit misleading, so add more comments
where its defined.
We send data in two chunks in multifd thread:
 - send the packet with normal (non-zero) guest pages offsets that are
   dirty.
   This uses the packet_len and we increment number of packets
   for this thread that are sent;
 - send the normal (non-zero) guest dirty pages themselves in iovs.
   The total size of the data pointed by all iovs for this chunk
   is next_packet_size. We do not increment the packet_num for this
   thread when sending actual pages;

When compression is enabled, next_packet_size is used to indicate
the size of the compressed buffer on source and destination.

Will be it helpful to rename it as data_size or dirty_data_size?

Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
---
 migration/multifd.h | 35 ++++++++++++++++++++++++++++++-----
 1 file changed, 30 insertions(+), 5 deletions(-)

Comments

Fabiano Rosas Oct. 11, 2023, 7:04 p.m. UTC | #1
Elena Ufimtseva <elena.ufimtseva@oracle.com> writes:

> next_packet_size name is a bit misleading, so add more comments
> where its defined.
> We send data in two chunks in multifd thread:
>  - send the packet with normal (non-zero) guest pages offsets that are
>    dirty.
>    This uses the packet_len and we increment number of packets
>    for this thread that are sent;
>  - send the normal (non-zero) guest dirty pages themselves in iovs.
>    The total size of the data pointed by all iovs for this chunk
>    is next_packet_size. We do not increment the packet_num for this
>    thread when sending actual pages;
>
> When compression is enabled, next_packet_size is used to indicate
> the size of the compressed buffer on source and destination.
>
> Will be it helpful to rename it as data_size or dirty_data_size?
>
> Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
> ---
>  migration/multifd.h | 35 ++++++++++++++++++++++++++++++-----
>  1 file changed, 30 insertions(+), 5 deletions(-)
>
> diff --git a/migration/multifd.h b/migration/multifd.h
> index a835643b48..37da9b68c2 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -45,7 +45,13 @@ typedef struct {
>      uint32_t pages_alloc;
>      /* non zero pages */
>      uint32_t normal_pages;
> -    /* size of the next packet that contains pages */
> +    /*
> +     * amount of data to be sent to the destination
> +     * that is calculated as
> +     *  - number of the normal guest dirty pages * page_size in non
> +     *    compression case;
> +     *  - equals of the compressed data size to be received;
> +     */
>      uint32_t next_packet_size;

So maybe "payload_size"? This one, not the "next".

Let's see what other people think, but I'm in favor of just renaming
instead of documenting. Later on the maths change and the comment might
become off-sync with the code.
diff mbox series

Patch

diff --git a/migration/multifd.h b/migration/multifd.h
index a835643b48..37da9b68c2 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -45,7 +45,13 @@  typedef struct {
     uint32_t pages_alloc;
     /* non zero pages */
     uint32_t normal_pages;
-    /* size of the next packet that contains pages */
+    /*
+     * amount of data to be sent to the destination
+     * that is calculated as
+     *  - number of the normal guest dirty pages * page_size in non
+     *    compression case;
+     *  - equals of the compressed data size to be received;
+     */
     uint32_t next_packet_size;
     uint64_t packet_num;
     uint64_t unused[4];    /* Reserved for future use */
@@ -79,11 +85,18 @@  typedef struct {
     QIOChannel *c;
     /* is the yank function registered */
     bool registered_yank;
-    /* packet allocated len */
+    /*
+     * allocated length of a packet to be transferred.
+     * It has a size of MultiFDPacket struct plus
+     * the size of the array of guest page offsets (page_count * page_size).
+     */
     uint32_t packet_len;
     /* guest page size */
     uint32_t page_size;
-    /* number of pages in a full packet */
+    /*
+     * maximum number of dirty pages in a full packet calculated as
+     * MULTIFD_PACKET_SIZE / qemu_target_page_size()
+     */
     uint32_t page_count;
     /* multifd flags for sending ram */
     int write_flags;
@@ -116,7 +129,13 @@  typedef struct {
 
     /* pointer to the packet */
     MultiFDPacket_t *packet;
-    /* size of the next packet that contains pages */
+    /*
+     * amount of data to be sent to the destination
+     * that is calculated as
+     *  - number of the normal guest dirty pages * page_size in non
+     *    compression case;
+     *  - equals of the compressed data size to be received;
+     */
     uint32_t next_packet_size;
     /* packets sent through this channel */
     uint64_t num_packets;
@@ -171,7 +190,13 @@  typedef struct {
 
     /* pointer to the packet */
     MultiFDPacket_t *packet;
-    /* size of the next packet that contains pages */
+    /*
+     * amount of data to be received by the destination
+     * that is calculated as
+     *  - number of the normal guest dirty pages * page_size in non
+     *    compression case;
+     *  - equals of the compressed data size to be received;
+     */
     uint32_t next_packet_size;
     /* packets sent through this channel */
     uint64_t num_packets;