diff mbox series

[v3,06/10] migration/dirtyrate: Compare page hash results for recorded sampled page

Message ID 1597634433-18809-7-git-send-email-zhengchuan@huawei.com (mailing list archive)
State New, archived
Headers show
Series *** A Method for evaluating dirty page rate *** | expand

Commit Message

Zheng Chuan Aug. 17, 2020, 3:20 a.m. UTC
Compare page hash results for recorded sampled page.

Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
---
 migration/dirtyrate.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 76 insertions(+)

Comments

Dr. David Alan Gilbert Aug. 20, 2020, 5:36 p.m. UTC | #1
* Chuan Zheng (zhengchuan@huawei.com) wrote:
> Compare page hash results for recorded sampled page.
> 
> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
> ---
>  migration/dirtyrate.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 76 insertions(+)
> 
> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
> index 62b6f69..3ce25f5 100644
> --- a/migration/dirtyrate.c
> +++ b/migration/dirtyrate.c
> @@ -215,6 +215,82 @@ static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
>      return 0;
>  }
>  
> +static int calc_page_dirty_rate(struct RamblockDirtyInfo *info)
> +{
> +    uint8_t *md = NULL;
> +    int i;
> +    int ret = 0;
> +
> +    md = g_try_new0(uint8_t, qcrypto_hash_len);
> +    if (!md) {
> +        return -1;
> +    }

As previously asked; isn't this a nice small simple fixed length - no
need to allocate it?

> +
> +    for (i = 0; i < info->sample_pages_count; i++) {
> +        ret = get_ramblock_vfn_hash(info, info->sample_page_vfn[i], &md);
> +        if (ret < 0) {
> +            goto out;
> +        }
> +
> +        if (memcmp(md, info->hash_result + i * qcrypto_hash_len,
> +                   qcrypto_hash_len) != 0) {
> +            info->sample_dirty_count++;
> +        }
> +    }
> +
> +out:
> +    g_free(md);
> +    return ret;
> +}
> +
> +static bool find_page_matched(RAMBlock *block, struct RamblockDirtyInfo *infos,
> +                              int count, struct RamblockDirtyInfo **matched)
> +{
> +    int i;
> +
> +    for (i = 0; i < count; i++) {
> +        if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
> +            break;
> +        }
> +    }
> +
> +    if (i == count) {
> +        return false;
> +    }
> +
> +    if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
> +        infos[i].ramblock_pages !=
> +            (qemu_ram_get_used_length(block) >> 12)) {
> +        return false;

I previously asked how this happens.
Also this was DIRTYRATE_PAGE_SIZE_SHIFT

> +    }
> +
> +    *matched = &infos[i];
> +    return true;
> +}
> +
> +static int compare_page_hash_info(struct RamblockDirtyInfo *info,
> +                                  int block_index)
> +{
> +    struct RamblockDirtyInfo *block_dinfo = NULL;
> +    RAMBlock *block = NULL;
> +
> +    RAMBLOCK_FOREACH_MIGRATABLE(block) {
> +        block_dinfo = NULL;

So you've removed the selction of only some RAMBlocks now?

> +        if (!find_page_matched(block, info, block_index + 1, &block_dinfo)) {
> +            continue;
> +        }
> +        if (calc_page_dirty_rate(block_dinfo) < 0) {
> +            return -1;
> +        }
> +        update_dirtyrate_stat(block_dinfo);
> +    }
> +    if (!dirty_stat.total_sample_count) {
> +        return -1;
> +    }
> +
> +    return 0;
> +}
> +
>  static void calculate_dirtyrate(struct DirtyRateConfig config)
>  {
>      /* todo */
> -- 
> 1.8.3.1
>
Zheng Chuan Aug. 21, 2020, 12:01 p.m. UTC | #2
On 2020/8/21 1:36, Dr. David Alan Gilbert wrote:
> * Chuan Zheng (zhengchuan@huawei.com) wrote:
>> Compare page hash results for recorded sampled page.
>>
>> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
>> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
>> ---
>>  migration/dirtyrate.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 76 insertions(+)
>>
>> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
>> index 62b6f69..3ce25f5 100644
>> --- a/migration/dirtyrate.c
>> +++ b/migration/dirtyrate.c
>> @@ -215,6 +215,82 @@ static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
>>      return 0;
>>  }
>>  
>> +static int calc_page_dirty_rate(struct RamblockDirtyInfo *info)
>> +{
>> +    uint8_t *md = NULL;
>> +    int i;
>> +    int ret = 0;
>> +
>> +    md = g_try_new0(uint8_t, qcrypto_hash_len);
>> +    if (!md) {
>> +        return -1;
>> +    }
> 
> As previously asked; isn't this a nice small simple fixed length - no
> need to allocate it?
> 
Yes, it could use QCRYPTO_HASH_LEN to define an array.
>> +
>> +    for (i = 0; i < info->sample_pages_count; i++) {
>> +        ret = get_ramblock_vfn_hash(info, info->sample_page_vfn[i], &md);
>> +        if (ret < 0) {
>> +            goto out;
>> +        }
>> +
>> +        if (memcmp(md, info->hash_result + i * qcrypto_hash_len,
>> +                   qcrypto_hash_len) != 0) {
>> +            info->sample_dirty_count++;
>> +        }
>> +    }
>> +
>> +out:
>> +    g_free(md);
>> +    return ret;
>> +}
>> +
>> +static bool find_page_matched(RAMBlock *block, struct RamblockDirtyInfo *infos,
>> +                              int count, struct RamblockDirtyInfo **matched)
>> +{
>> +    int i;
>> +
>> +    for (i = 0; i < count; i++) {
>> +        if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
>> +            break;
>> +        }
>> +    }
>> +
>> +    if (i == count) {
>> +        return false;
>> +    }
>> +
>> +    if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
>> +        infos[i].ramblock_pages !=
>> +            (qemu_ram_get_used_length(block) >> 12)) {
>> +        return false;
> 
> I previously asked how this happens.
> Also this was DIRTYRATE_PAGE_SIZE_SHIFT
> 
Here, we want to find same ramblock we sampled before.
We just ignore the ramblock if its hva address or size changed due to memory hot-plug during the measurement.

>> +    }
>> +
>> +    *matched = &infos[i];
>> +    return true;
>> +}
>> +
>> +static int compare_page_hash_info(struct RamblockDirtyInfo *info,
>> +                                  int block_index)
>> +{
>> +    struct RamblockDirtyInfo *block_dinfo = NULL;
>> +    RAMBlock *block = NULL;
>> +
>> +    RAMBLOCK_FOREACH_MIGRATABLE(block) {
>> +        block_dinfo = NULL;
> 
> So you've removed the selction of only some RAMBlocks now?
> 
In next patch:), i add functions to skip sampling ramblock.

>> +        if (!find_page_matched(block, info, block_index + 1, &block_dinfo)) {
>> +            continue;
>> +        }
>> +        if (calc_page_dirty_rate(block_dinfo) < 0) {
>> +            return -1;
>> +        }
>> +        update_dirtyrate_stat(block_dinfo);
>> +    }
>> +    if (!dirty_stat.total_sample_count) {
>> +        return -1;
>> +    }
>> +
>> +    return 0;
>> +}
>> +
>>  static void calculate_dirtyrate(struct DirtyRateConfig config)
>>  {
>>      /* todo */
>> -- 
>> 1.8.3.1
>>
Dr. David Alan Gilbert Aug. 21, 2020, 12:12 p.m. UTC | #3
* Zheng Chuan (zhengchuan@huawei.com) wrote:
> 
> 
> On 2020/8/21 1:36, Dr. David Alan Gilbert wrote:
> > * Chuan Zheng (zhengchuan@huawei.com) wrote:
> >> Compare page hash results for recorded sampled page.
> >>
> >> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
> >> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
> >> ---
> >>  migration/dirtyrate.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++
> >>  1 file changed, 76 insertions(+)
> >>
> >> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
> >> index 62b6f69..3ce25f5 100644
> >> --- a/migration/dirtyrate.c
> >> +++ b/migration/dirtyrate.c
> >> @@ -215,6 +215,82 @@ static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
> >>      return 0;
> >>  }
> >>  
> >> +static int calc_page_dirty_rate(struct RamblockDirtyInfo *info)
> >> +{
> >> +    uint8_t *md = NULL;
> >> +    int i;
> >> +    int ret = 0;
> >> +
> >> +    md = g_try_new0(uint8_t, qcrypto_hash_len);
> >> +    if (!md) {
> >> +        return -1;
> >> +    }
> > 
> > As previously asked; isn't this a nice small simple fixed length - no
> > need to allocate it?
> > 
> Yes, it could use QCRYPTO_HASH_LEN to define an array.
> >> +
> >> +    for (i = 0; i < info->sample_pages_count; i++) {
> >> +        ret = get_ramblock_vfn_hash(info, info->sample_page_vfn[i], &md);
> >> +        if (ret < 0) {
> >> +            goto out;
> >> +        }
> >> +
> >> +        if (memcmp(md, info->hash_result + i * qcrypto_hash_len,
> >> +                   qcrypto_hash_len) != 0) {
> >> +            info->sample_dirty_count++;
> >> +        }
> >> +    }
> >> +
> >> +out:
> >> +    g_free(md);
> >> +    return ret;
> >> +}
> >> +
> >> +static bool find_page_matched(RAMBlock *block, struct RamblockDirtyInfo *infos,
> >> +                              int count, struct RamblockDirtyInfo **matched)
> >> +{
> >> +    int i;
> >> +
> >> +    for (i = 0; i < count; i++) {
> >> +        if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
> >> +            break;
> >> +        }
> >> +    }
> >> +
> >> +    if (i == count) {
> >> +        return false;
> >> +    }
> >> +
> >> +    if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
> >> +        infos[i].ramblock_pages !=
> >> +            (qemu_ram_get_used_length(block) >> 12)) {
> >> +        return false;
> > 
> > I previously asked how this happens.
> > Also this was DIRTYRATE_PAGE_SIZE_SHIFT
> > 
> Here, we want to find same ramblock we sampled before.
> We just ignore the ramblock if its hva address or size changed due to memory hot-plug during the measurement.

OK, just try and make the code consistent between '12',
'DIRTYRATE_PAGE_SIZE_SHIFT'
or TARGET_PAGE_SIZE/SHIFT.

Dave

> >> +    }
> >> +
> >> +    *matched = &infos[i];
> >> +    return true;
> >> +}
> >> +
> >> +static int compare_page_hash_info(struct RamblockDirtyInfo *info,
> >> +                                  int block_index)
> >> +{
> >> +    struct RamblockDirtyInfo *block_dinfo = NULL;
> >> +    RAMBlock *block = NULL;
> >> +
> >> +    RAMBLOCK_FOREACH_MIGRATABLE(block) {
> >> +        block_dinfo = NULL;
> > 
> > So you've removed the selction of only some RAMBlocks now?
> > 
> In next patch:), i add functions to skip sampling ramblock.
> 
> >> +        if (!find_page_matched(block, info, block_index + 1, &block_dinfo)) {
> >> +            continue;
> >> +        }
> >> +        if (calc_page_dirty_rate(block_dinfo) < 0) {
> >> +            return -1;
> >> +        }
> >> +        update_dirtyrate_stat(block_dinfo);
> >> +    }
> >> +    if (!dirty_stat.total_sample_count) {
> >> +        return -1;
> >> +    }
> >> +
> >> +    return 0;
> >> +}
> >> +
> >>  static void calculate_dirtyrate(struct DirtyRateConfig config)
> >>  {
> >>      /* todo */
> >> -- 
> >> 1.8.3.1
> >>
>
diff mbox series

Patch

diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index 62b6f69..3ce25f5 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -215,6 +215,82 @@  static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
     return 0;
 }
 
+static int calc_page_dirty_rate(struct RamblockDirtyInfo *info)
+{
+    uint8_t *md = NULL;
+    int i;
+    int ret = 0;
+
+    md = g_try_new0(uint8_t, qcrypto_hash_len);
+    if (!md) {
+        return -1;
+    }
+
+    for (i = 0; i < info->sample_pages_count; i++) {
+        ret = get_ramblock_vfn_hash(info, info->sample_page_vfn[i], &md);
+        if (ret < 0) {
+            goto out;
+        }
+
+        if (memcmp(md, info->hash_result + i * qcrypto_hash_len,
+                   qcrypto_hash_len) != 0) {
+            info->sample_dirty_count++;
+        }
+    }
+
+out:
+    g_free(md);
+    return ret;
+}
+
+static bool find_page_matched(RAMBlock *block, struct RamblockDirtyInfo *infos,
+                              int count, struct RamblockDirtyInfo **matched)
+{
+    int i;
+
+    for (i = 0; i < count; i++) {
+        if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
+            break;
+        }
+    }
+
+    if (i == count) {
+        return false;
+    }
+
+    if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
+        infos[i].ramblock_pages !=
+            (qemu_ram_get_used_length(block) >> 12)) {
+        return false;
+    }
+
+    *matched = &infos[i];
+    return true;
+}
+
+static int compare_page_hash_info(struct RamblockDirtyInfo *info,
+                                  int block_index)
+{
+    struct RamblockDirtyInfo *block_dinfo = NULL;
+    RAMBlock *block = NULL;
+
+    RAMBLOCK_FOREACH_MIGRATABLE(block) {
+        block_dinfo = NULL;
+        if (!find_page_matched(block, info, block_index + 1, &block_dinfo)) {
+            continue;
+        }
+        if (calc_page_dirty_rate(block_dinfo) < 0) {
+            return -1;
+        }
+        update_dirtyrate_stat(block_dinfo);
+    }
+    if (!dirty_stat.total_sample_count) {
+        return -1;
+    }
+
+    return 0;
+}
+
 static void calculate_dirtyrate(struct DirtyRateConfig config)
 {
     /* todo */