diff mbox series

[v2,01/11] dump: Cleanup memblock usage

Message ID 20220713130322.25517-2-frankja@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series dump: Add arch section and s390x PV dump | expand

Commit Message

Janosch Frank July 13, 2022, 1:03 p.m. UTC
The iteration over the memblocks is hard to understand so it's about
time to clean it up.

struct DumpState's next_block and start members can and should be
local variables within the iterator.

Instead of manually grabbing the next memblock we can use
QTAILQ_FOREACH to iterate over all memblocks.

The begin and length fields in the DumpState have been left untouched
since the qmp arguments share their names.

Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
---
 dump/dump.c           | 91 +++++++++++--------------------------------
 include/sysemu/dump.h | 47 +++++++++++++++++++---
 2 files changed, 65 insertions(+), 73 deletions(-)

Comments

Marc-André Lureau July 13, 2022, 3:09 p.m. UTC | #1
Hi

On Wed, Jul 13, 2022 at 5:07 PM Janosch Frank <frankja@linux.ibm.com> wrote:
>
> The iteration over the memblocks is hard to understand so it's about
> time to clean it up.
>
> struct DumpState's next_block and start members can and should be
> local variables within the iterator.
>
> Instead of manually grabbing the next memblock we can use
> QTAILQ_FOREACH to iterate over all memblocks.
>
> The begin and length fields in the DumpState have been left untouched
> since the qmp arguments share their names.
>
> Signed-off-by: Janosch Frank <frankja@linux.ibm.com>

After this patch:
./qemu-system-x86_64 -monitor stdio -S
(qemu) dump-guest-memory foo
Error: dump: failed to save memory: Bad address


> ---
>  dump/dump.c           | 91 +++++++++++--------------------------------
>  include/sysemu/dump.h | 47 +++++++++++++++++++---
>  2 files changed, 65 insertions(+), 73 deletions(-)
>
> diff --git a/dump/dump.c b/dump/dump.c
> index 4d9658ffa2..6feba3cbfa 100644
> --- a/dump/dump.c
> +++ b/dump/dump.c
> @@ -591,56 +591,27 @@ static void dump_begin(DumpState *s, Error **errp)
>      write_elf_notes(s, errp);
>  }
>
> -static int get_next_block(DumpState *s, GuestPhysBlock *block)
> -{
> -    while (1) {
> -        block = QTAILQ_NEXT(block, next);
> -        if (!block) {
> -            /* no more block */
> -            return 1;
> -        }
> -
> -        s->start = 0;
> -        s->next_block = block;
> -        if (s->has_filter) {
> -            if (block->target_start >= s->begin + s->length ||
> -                block->target_end <= s->begin) {
> -                /* This block is out of the range */
> -                continue;
> -            }
> -
> -            if (s->begin > block->target_start) {
> -                s->start = s->begin - block->target_start;
> -            }
> -        }
> -
> -        return 0;
> -    }
> -}
> -
>  /* write all memory to vmcore */
>  static void dump_iterate(DumpState *s, Error **errp)
>  {
>      ERRP_GUARD();
>      GuestPhysBlock *block;
> -    int64_t size;
> +    int64_t memblock_size, memblock_start;
>
> -    do {
> -        block = s->next_block;
> -
> -        size = block->target_end - block->target_start;
> -        if (s->has_filter) {
> -            size -= s->start;
> -            if (s->begin + s->length < block->target_end) {
> -                size -= block->target_end - (s->begin + s->length);
> -            }
> +    QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
> +        memblock_start = dump_get_memblock_start(block, s->begin, s->length);
> +        if (memblock_start == -1) {
> +            continue;
>          }
> -        write_memory(s, block, s->start, size, errp);
> +
> +        memblock_size = dump_get_memblock_size(block, s->begin, s->length);
> +
> +        /* Write the memory to file */
> +        write_memory(s, block, memblock_start, memblock_size, errp);
>          if (*errp) {
>              return;
>          }
> -
> -    } while (!get_next_block(s, block));
> +    }
>  }
>
>  static void create_vmcore(DumpState *s, Error **errp)
> @@ -1490,30 +1461,22 @@ static void create_kdump_vmcore(DumpState *s, Error **errp)
>      }
>  }
>
> -static ram_addr_t get_start_block(DumpState *s)
> +static int validate_start_block(DumpState *s)
>  {
>      GuestPhysBlock *block;
>
>      if (!s->has_filter) {
> -        s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
>          return 0;
>      }
>
>      QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
> +        /* This block is out of the range */
>          if (block->target_start >= s->begin + s->length ||
>              block->target_end <= s->begin) {
> -            /* This block is out of the range */
>              continue;
>          }
> -
> -        s->next_block = block;
> -        if (s->begin > block->target_start) {
> -            s->start = s->begin - block->target_start;
> -        } else {
> -            s->start = 0;
> -        }
> -        return s->start;
> -    }
> +        return 0;
> +   }
>
>      return -1;
>  }
> @@ -1540,25 +1503,17 @@ bool qemu_system_dump_in_progress(void)
>      return (qatomic_read(&state->status) == DUMP_STATUS_ACTIVE);
>  }
>
> -/* calculate total size of memory to be dumped (taking filter into
> - * acoount.) */
> +/*
> + * calculate total size of memory to be dumped (taking filter into
> + * account.)

thanks for fixing the typo

> + */
>  static int64_t dump_calculate_size(DumpState *s)
>  {
>      GuestPhysBlock *block;
> -    int64_t size = 0, total = 0, left = 0, right = 0;
> +    int64_t total = 0;
>
>      QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
> -        if (s->has_filter) {
> -            /* calculate the overlapped region. */
> -            left = MAX(s->begin, block->target_start);
> -            right = MIN(s->begin + s->length, block->target_end);
> -            size = right - left;
> -            size = size > 0 ? size : 0;
> -        } else {
> -            /* count the whole region in */
> -            size = (block->target_end - block->target_start);
> -        }
> -        total += size;
> +        total += dump_get_memblock_size(block, s->begin, s->length);
>      }
>
>      return total;
> @@ -1660,8 +1615,8 @@ static void dump_init(DumpState *s, int fd, bool has_format,
>          goto cleanup;
>      }
>
> -    s->start = get_start_block(s);
> -    if (s->start == -1) {
> +    /* Is the filter filtering everything? */
> +    if (validate_start_block(s) == -1) {
>          error_setg(errp, QERR_INVALID_PARAMETER, "begin");
>          goto cleanup;
>      }
> diff --git a/include/sysemu/dump.h b/include/sysemu/dump.h
> index ffc2ea1072..f3bf98c220 100644
> --- a/include/sysemu/dump.h
> +++ b/include/sysemu/dump.h
> @@ -166,11 +166,10 @@ typedef struct DumpState {
>      hwaddr memory_offset;
>      int fd;
>
> -    GuestPhysBlock *next_block;
> -    ram_addr_t start;
> -    bool has_filter;
> -    int64_t begin;
> -    int64_t length;
> +    /* Guest memory related data */
> +    bool has_filter;           /* Are we dumping parts of the memory? */
> +    int64_t begin;             /* Start address of the chunk we want to dump */
> +    int64_t length;            /* Length of the dump we want to dump */
>
>      uint8_t *note_buf;          /* buffer for notes */
>      size_t note_buf_offset;     /* the writing place in note_buf */
> @@ -203,4 +202,42 @@ typedef struct DumpState {
>  uint16_t cpu_to_dump16(DumpState *s, uint16_t val);
>  uint32_t cpu_to_dump32(DumpState *s, uint32_t val);
>  uint64_t cpu_to_dump64(DumpState *s, uint64_t val);
> +
> +static inline int64_t dump_get_memblock_size(GuestPhysBlock *block, int64_t filter_area_start,
> +                                             int64_t filter_area_length)
> +{
> +    int64_t size, left, right;
> +
> +    /* No filter, return full size */
> +    if (!filter_area_length) {
> +        return block->target_end - block->target_start;
> +    }
> +
> +    /* calculate the overlapped region. */
> +    left = MAX(filter_area_start, block->target_start);
> +    right = MIN(filter_area_start + filter_area_length, block->target_end);
> +    size = right - left;
> +    size = size > 0 ? size : 0;
> +
> +    return size;
> +}
> +
> +static inline int64_t dump_get_memblock_start(GuestPhysBlock *block, int64_t filter_area_start,
> +                                  int64_t filter_area_length)
> +{
> +    if (filter_area_length) {
> +        /*
> +         * Check if block is within guest memory dump area. If not
> +         * go to next one.
> +         */

Or rather "return -1 if the block is not within filter area"

> +        if (block->target_start >= filter_area_start + filter_area_length ||
> +            block->target_end <= filter_area_start) {
> +            return -1;
> +        }
> +        if (filter_area_start > block->target_start) {
> +            return filter_area_start - block->target_start;
> +        }
> +    }
> +    return block->target_start;

This used to be 0. Changing that, I think the patch looks good.
Although it could perhaps be splitted to introduce the two functions.

> +}
>  #endif
> --
> 2.34.1
>
Janosch Frank July 13, 2022, 3:30 p.m. UTC | #2
On 7/13/22 17:09, Marc-André Lureau wrote:
> Hi
> 
> On Wed, Jul 13, 2022 at 5:07 PM Janosch Frank <frankja@linux.ibm.com> wrote:
>>
>> The iteration over the memblocks is hard to understand so it's about
>> time to clean it up.
>>
>> struct DumpState's next_block and start members can and should be
>> local variables within the iterator.
>>
>> Instead of manually grabbing the next memblock we can use
>> QTAILQ_FOREACH to iterate over all memblocks.
>>
>> The begin and length fields in the DumpState have been left untouched
>> since the qmp arguments share their names.
>>
>> Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
> 
> After this patch:
> ./qemu-system-x86_64 -monitor stdio -S
> (qemu) dump-guest-memory foo
> Error: dump: failed to save memory: Bad address

If you have more ways to check for dump errors then please send them to 
me. I'm aware that this might not have been a 100% conversion and I'm a 
bit terrified about the fact that this will affect all architectures.


Anyway, I'll have a look.

[...]

>> +static inline int64_t dump_get_memblock_start(GuestPhysBlock *block, int64_t filter_area_start,
>> +                                  int64_t filter_area_length)
>> +{
>> +    if (filter_area_length) {
>> +        /*
>> +         * Check if block is within guest memory dump area. If not
>> +         * go to next one.
>> +         */
> 
> Or rather "return -1 if the block is not within filter area"

Sure

> 
>> +        if (block->target_start >= filter_area_start + filter_area_length ||
>> +            block->target_end <= filter_area_start) {
>> +            return -1;
>> +        }
>> +        if (filter_area_start > block->target_start) {
>> +            return filter_area_start - block->target_start;
>> +        }
>> +    }
>> +    return block->target_start;
> 
> This used to be 0. Changing that, I think the patch looks good.
> Although it could perhaps be splitted to introduce the two functions.

Yes but the 0 was used to indicate that we would have needed continue 
iterating and the iteration is done via other means in this patch.

Or am I missing something?

> 
>> +}
>>   #endif
>> --
>> 2.34.1
>>
> 
>
Marc-André Lureau July 13, 2022, 3:35 p.m. UTC | #3
Hi

On Wed, Jul 13, 2022 at 7:30 PM Janosch Frank <frankja@linux.ibm.com> wrote:
>
> On 7/13/22 17:09, Marc-André Lureau wrote:
> > Hi
> >
> > On Wed, Jul 13, 2022 at 5:07 PM Janosch Frank <frankja@linux.ibm.com> wrote:
> >>
> >> The iteration over the memblocks is hard to understand so it's about
> >> time to clean it up.
> >>
> >> struct DumpState's next_block and start members can and should be
> >> local variables within the iterator.
> >>
> >> Instead of manually grabbing the next memblock we can use
> >> QTAILQ_FOREACH to iterate over all memblocks.
> >>
> >> The begin and length fields in the DumpState have been left untouched
> >> since the qmp arguments share their names.
> >>
> >> Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
> >
> > After this patch:
> > ./qemu-system-x86_64 -monitor stdio -S
> > (qemu) dump-guest-memory foo
> > Error: dump: failed to save memory: Bad address
>
> If you have more ways to check for dump errors then please send them to
> me. I'm aware that this might not have been a 100% conversion and I'm a
> bit terrified about the fact that this will affect all architectures.

Same feeling here. Maybe it's about time to write real dump tests!

>
>
> Anyway, I'll have a look.
>
> [...]
>
> >> +static inline int64_t dump_get_memblock_start(GuestPhysBlock *block, int64_t filter_area_start,
> >> +                                  int64_t filter_area_length)
> >> +{
> >> +    if (filter_area_length) {
> >> +        /*
> >> +         * Check if block is within guest memory dump area. If not
> >> +         * go to next one.
> >> +         */
> >
> > Or rather "return -1 if the block is not within filter area"
>
> Sure
>
> >
> >> +        if (block->target_start >= filter_area_start + filter_area_length ||
> >> +            block->target_end <= filter_area_start) {
> >> +            return -1;
> >> +        }
> >> +        if (filter_area_start > block->target_start) {
> >> +            return filter_area_start - block->target_start;
> >> +        }
> >> +    }
> >> +    return block->target_start;
> >
> > This used to be 0. Changing that, I think the patch looks good.
> > Although it could perhaps be splitted to introduce the two functions.
>
> Yes but the 0 was used to indicate that we would have needed continue
> iterating and the iteration is done via other means in this patch.
>
> Or am I missing something?

Well, you changed the way the loop used to work. it used to return 1/0
to indicate stop/continue and rely on s->start / s->next_block. Now
you return memblock_start.

>
> >
> >> +}
> >>   #endif
> >> --
> >> 2.34.1
> >>
> >
> >
>
Janosch Frank July 14, 2022, 9:40 a.m. UTC | #4
On 7/13/22 17:35, Marc-André Lureau wrote:
> Hi
> 
> On Wed, Jul 13, 2022 at 7:30 PM Janosch Frank <frankja@linux.ibm.com> wrote:
>>
>> On 7/13/22 17:09, Marc-André Lureau wrote:
>>> Hi
>>>
>>> On Wed, Jul 13, 2022 at 5:07 PM Janosch Frank <frankja@linux.ibm.com> wrote:
>>>>
>>>> The iteration over the memblocks is hard to understand so it's about
>>>> time to clean it up.
>>>>
>>>> struct DumpState's next_block and start members can and should be
>>>> local variables within the iterator.
>>>>
>>>> Instead of manually grabbing the next memblock we can use
>>>> QTAILQ_FOREACH to iterate over all memblocks.
>>>>
>>>> The begin and length fields in the DumpState have been left untouched
>>>> since the qmp arguments share their names.
>>>>
>>>> Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
>>>
>>> After this patch:
>>> ./qemu-system-x86_64 -monitor stdio -S
>>> (qemu) dump-guest-memory foo
>>> Error: dump: failed to save memory: Bad address
>>
>> If you have more ways to check for dump errors then please send them to
>> me. I'm aware that this might not have been a 100% conversion and I'm a
>> bit terrified about the fact that this will affect all architectures.
> 
> Same feeling here. Maybe it's about time to write real dump tests!

We have tests for s390 and I've prompted for tests with filtering so we 
can also cover that. Unfortunately s390 differs in the use of memory 
because we only have one large block which hid this error from me.


>>>
>>>> +        if (block->target_start >= filter_area_start + filter_area_length ||
>>>> +            block->target_end <= filter_area_start) {
>>>> +            return -1;
>>>> +        }
>>>> +        if (filter_area_start > block->target_start) {
>>>> +            return filter_area_start - block->target_start;
>>>> +        }
>>>> +    }
>>>> +    return block->target_start;
>>>
>>> This used to be 0. Changing that, I think the patch looks good.
>>> Although it could perhaps be splitted to introduce the two functions.
>>
>> Yes but the 0 was used to indicate that we would have needed continue
>> iterating and the iteration is done via other means in this patch.
>>
>> Or am I missing something?

Had a look, turns out I missed something.

> 
> Well, you changed the way the loop used to work. it used to return 1/0
> to indicate stop/continue and rely on s->start / s->next_block. Now
> you return memblock_start.

Maybe we should call this "dump_get_memblock_start_offset()" to make it 
clearer that we don't return block->target_start i.e. a start address 
but rather an offset that we tack on the host address to read the memory?

> 
>>
>>>
>>>> +}
>>>>    #endif
>>>> --
>>>> 2.34.1
>>>>
>>>
>>>
>>
> 
>
Marc-André Lureau July 15, 2022, 8:34 a.m. UTC | #5
Hi

On Thu, Jul 14, 2022 at 1:46 PM Janosch Frank <frankja@linux.ibm.com> wrote:

> On 7/13/22 17:35, Marc-André Lureau wrote:
> > Hi
> >
> > On Wed, Jul 13, 2022 at 7:30 PM Janosch Frank <frankja@linux.ibm.com>
> wrote:
> >>
> >> On 7/13/22 17:09, Marc-André Lureau wrote:
> >>> Hi
> >>>
> >>> On Wed, Jul 13, 2022 at 5:07 PM Janosch Frank <frankja@linux.ibm.com>
> wrote:
> >>>>
> >>>> The iteration over the memblocks is hard to understand so it's about
> >>>> time to clean it up.
> >>>>
> >>>> struct DumpState's next_block and start members can and should be
> >>>> local variables within the iterator.
> >>>>
> >>>> Instead of manually grabbing the next memblock we can use
> >>>> QTAILQ_FOREACH to iterate over all memblocks.
> >>>>
> >>>> The begin and length fields in the DumpState have been left untouched
> >>>> since the qmp arguments share their names.
> >>>>
> >>>> Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
> >>>
> >>> After this patch:
> >>> ./qemu-system-x86_64 -monitor stdio -S
> >>> (qemu) dump-guest-memory foo
> >>> Error: dump: failed to save memory: Bad address
> >>
> >> If you have more ways to check for dump errors then please send them to
> >> me. I'm aware that this might not have been a 100% conversion and I'm a
> >> bit terrified about the fact that this will affect all architectures.
> >
> > Same feeling here. Maybe it's about time to write real dump tests!
>
> We have tests for s390 and I've prompted for tests with filtering so we
> can also cover that. Unfortunately s390 differs in the use of memory
> because we only have one large block which hid this error from me.
>
>
> >>>
> >>>> +        if (block->target_start >= filter_area_start +
> filter_area_length ||
> >>>> +            block->target_end <= filter_area_start) {
> >>>> +            return -1;
> >>>> +        }
> >>>> +        if (filter_area_start > block->target_start) {
> >>>> +            return filter_area_start - block->target_start;
> >>>> +        }
> >>>> +    }
> >>>> +    return block->target_start;
> >>>
> >>> This used to be 0. Changing that, I think the patch looks good.
> >>> Although it could perhaps be splitted to introduce the two functions.
> >>
> >> Yes but the 0 was used to indicate that we would have needed continue
> >> iterating and the iteration is done via other means in this patch.
> >>
> >> Or am I missing something?
>
> Had a look, turns out I missed something.
>
> >
> > Well, you changed the way the loop used to work. it used to return 1/0
> > to indicate stop/continue and rely on s->start / s->next_block. Now
> > you return memblock_start.
>
> Maybe we should call this "dump_get_memblock_start_offset()" to make it
> clearer that we don't return block->target_start i.e. a start address
> but rather an offset that we tack on the host address to read the memory?
>
>
Not a big difference to me. You would need to adjust write_memory() "start"
argument name as well then.


> >
> >>
> >>>
> >>>> +}
> >>>>    #endif
> >>>> --
> >>>> 2.34.1
> >>>>
> >>>
> >>>
> >>
> >
> >
>
>
diff mbox series

Patch

diff --git a/dump/dump.c b/dump/dump.c
index 4d9658ffa2..6feba3cbfa 100644
--- a/dump/dump.c
+++ b/dump/dump.c
@@ -591,56 +591,27 @@  static void dump_begin(DumpState *s, Error **errp)
     write_elf_notes(s, errp);
 }
 
-static int get_next_block(DumpState *s, GuestPhysBlock *block)
-{
-    while (1) {
-        block = QTAILQ_NEXT(block, next);
-        if (!block) {
-            /* no more block */
-            return 1;
-        }
-
-        s->start = 0;
-        s->next_block = block;
-        if (s->has_filter) {
-            if (block->target_start >= s->begin + s->length ||
-                block->target_end <= s->begin) {
-                /* This block is out of the range */
-                continue;
-            }
-
-            if (s->begin > block->target_start) {
-                s->start = s->begin - block->target_start;
-            }
-        }
-
-        return 0;
-    }
-}
-
 /* write all memory to vmcore */
 static void dump_iterate(DumpState *s, Error **errp)
 {
     ERRP_GUARD();
     GuestPhysBlock *block;
-    int64_t size;
+    int64_t memblock_size, memblock_start;
 
-    do {
-        block = s->next_block;
-
-        size = block->target_end - block->target_start;
-        if (s->has_filter) {
-            size -= s->start;
-            if (s->begin + s->length < block->target_end) {
-                size -= block->target_end - (s->begin + s->length);
-            }
+    QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
+        memblock_start = dump_get_memblock_start(block, s->begin, s->length);
+        if (memblock_start == -1) {
+            continue;
         }
-        write_memory(s, block, s->start, size, errp);
+
+        memblock_size = dump_get_memblock_size(block, s->begin, s->length);
+
+        /* Write the memory to file */
+        write_memory(s, block, memblock_start, memblock_size, errp);
         if (*errp) {
             return;
         }
-
-    } while (!get_next_block(s, block));
+    }
 }
 
 static void create_vmcore(DumpState *s, Error **errp)
@@ -1490,30 +1461,22 @@  static void create_kdump_vmcore(DumpState *s, Error **errp)
     }
 }
 
-static ram_addr_t get_start_block(DumpState *s)
+static int validate_start_block(DumpState *s)
 {
     GuestPhysBlock *block;
 
     if (!s->has_filter) {
-        s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
         return 0;
     }
 
     QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
+        /* This block is out of the range */
         if (block->target_start >= s->begin + s->length ||
             block->target_end <= s->begin) {
-            /* This block is out of the range */
             continue;
         }
-
-        s->next_block = block;
-        if (s->begin > block->target_start) {
-            s->start = s->begin - block->target_start;
-        } else {
-            s->start = 0;
-        }
-        return s->start;
-    }
+        return 0;
+   }
 
     return -1;
 }
@@ -1540,25 +1503,17 @@  bool qemu_system_dump_in_progress(void)
     return (qatomic_read(&state->status) == DUMP_STATUS_ACTIVE);
 }
 
-/* calculate total size of memory to be dumped (taking filter into
- * acoount.) */
+/*
+ * calculate total size of memory to be dumped (taking filter into
+ * account.)
+ */
 static int64_t dump_calculate_size(DumpState *s)
 {
     GuestPhysBlock *block;
-    int64_t size = 0, total = 0, left = 0, right = 0;
+    int64_t total = 0;
 
     QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
-        if (s->has_filter) {
-            /* calculate the overlapped region. */
-            left = MAX(s->begin, block->target_start);
-            right = MIN(s->begin + s->length, block->target_end);
-            size = right - left;
-            size = size > 0 ? size : 0;
-        } else {
-            /* count the whole region in */
-            size = (block->target_end - block->target_start);
-        }
-        total += size;
+        total += dump_get_memblock_size(block, s->begin, s->length);
     }
 
     return total;
@@ -1660,8 +1615,8 @@  static void dump_init(DumpState *s, int fd, bool has_format,
         goto cleanup;
     }
 
-    s->start = get_start_block(s);
-    if (s->start == -1) {
+    /* Is the filter filtering everything? */
+    if (validate_start_block(s) == -1) {
         error_setg(errp, QERR_INVALID_PARAMETER, "begin");
         goto cleanup;
     }
diff --git a/include/sysemu/dump.h b/include/sysemu/dump.h
index ffc2ea1072..f3bf98c220 100644
--- a/include/sysemu/dump.h
+++ b/include/sysemu/dump.h
@@ -166,11 +166,10 @@  typedef struct DumpState {
     hwaddr memory_offset;
     int fd;
 
-    GuestPhysBlock *next_block;
-    ram_addr_t start;
-    bool has_filter;
-    int64_t begin;
-    int64_t length;
+    /* Guest memory related data */
+    bool has_filter;           /* Are we dumping parts of the memory? */
+    int64_t begin;             /* Start address of the chunk we want to dump */
+    int64_t length;            /* Length of the dump we want to dump */
 
     uint8_t *note_buf;          /* buffer for notes */
     size_t note_buf_offset;     /* the writing place in note_buf */
@@ -203,4 +202,42 @@  typedef struct DumpState {
 uint16_t cpu_to_dump16(DumpState *s, uint16_t val);
 uint32_t cpu_to_dump32(DumpState *s, uint32_t val);
 uint64_t cpu_to_dump64(DumpState *s, uint64_t val);
+
+static inline int64_t dump_get_memblock_size(GuestPhysBlock *block, int64_t filter_area_start,
+                                             int64_t filter_area_length)
+{
+    int64_t size, left, right;
+
+    /* No filter, return full size */
+    if (!filter_area_length) {
+        return block->target_end - block->target_start;
+    }
+
+    /* calculate the overlapped region. */
+    left = MAX(filter_area_start, block->target_start);
+    right = MIN(filter_area_start + filter_area_length, block->target_end);
+    size = right - left;
+    size = size > 0 ? size : 0;
+
+    return size;
+}
+
+static inline int64_t dump_get_memblock_start(GuestPhysBlock *block, int64_t filter_area_start,
+                                  int64_t filter_area_length)
+{
+    if (filter_area_length) {
+        /*
+         * Check if block is within guest memory dump area. If not
+         * go to next one.
+         */
+        if (block->target_start >= filter_area_start + filter_area_length ||
+            block->target_end <= filter_area_start) {
+            return -1;
+        }
+        if (filter_area_start > block->target_start) {
+            return filter_area_start - block->target_start;
+        }
+    }
+    return block->target_start;
+}
 #endif