diff mbox series

[v6,07/12] migration/dirtyrate: Compare page hash results for recorded sampled page

Message ID 1598669577-76914-8-git-send-email-zhengchuan@huawei.com (mailing list archive)
State New, archived
Headers show
Series *** A Method for evaluating dirty page rate *** | expand

Commit Message

Zheng Chuan Aug. 29, 2020, 2:52 a.m. UTC
Compare page hash results for recorded sampled page.

Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
---
 migration/dirtyrate.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 63 insertions(+)

Comments

David Edmondson Aug. 31, 2020, 9:10 a.m. UTC | #1
On Saturday, 2020-08-29 at 10:52:52 +08, Chuan Zheng wrote:

> Compare page hash results for recorded sampled page.
>
> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
> ---
>  migration/dirtyrate.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 63 insertions(+)
>
> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
> index f4967fd..9cc2cbb 100644
> --- a/migration/dirtyrate.c
> +++ b/migration/dirtyrate.c
> @@ -193,6 +193,69 @@ static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
>      return 0;
>  }
>  
> +static void calc_page_dirty_rate(struct RamblockDirtyInfo *info)
> +{
> +    uint32_t crc;
> +    int i;
> +
> +    for (i = 0; i < info->sample_pages_count; i++) {
> +        crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]);
> +        if (crc != info->hash_result[i]) {
> +            info->sample_dirty_count++;
> +        }
> +    }
> +}
> +
> +static struct RamblockDirtyInfo *
> +find_page_matched(RAMBlock *block, int count,
> +                  struct RamblockDirtyInfo *infos)
> +{
> +    int i;
> +    struct RamblockDirtyInfo *matched;
> +
> +    for (i = 0; i < count; i++) {
> +        if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
> +            break;
> +        }
> +    }
> +
> +    if (i == count) {
> +        return NULL;
> +    }
> +
> +    if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
> +        infos[i].ramblock_pages !=
> +            (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) {
> +        return NULL;
> +    }
> +
> +    matched = &infos[i];
> +
> +    return matched;
> +}
> +
> +static int compare_page_hash_info(struct RamblockDirtyInfo *info,
> +                                  int block_index)
> +{
> +    struct RamblockDirtyInfo *block_dinfo = NULL;
> +    RAMBlock *block = NULL;
> +
> +    RAMBLOCK_FOREACH_MIGRATABLE(block) {
> +        block_dinfo = find_page_matched(block, block_index + 1, info);
> +        if (block_dinfo == NULL) {
> +            continue;
> +        }
> +        calc_page_dirty_rate(block_dinfo);
> +        update_dirtyrate_stat(block_dinfo);
> +    }
> +
> +    if (!DirtyStat.total_sample_count) {

total_sample_count isn't a boolean or pointer - comparing against 0
would be clearer.

> +        return -1;
> +    }
> +
> +    return 0;
> +}
> +
>  static void calculate_dirtyrate(struct DirtyRateConfig config)
>  {
>      /* todo */
> -- 
> 1.8.3.1

dme.
Zheng Chuan Aug. 31, 2020, 11:10 a.m. UTC | #2
On 2020/8/31 17:10, David Edmondson wrote:
> On Saturday, 2020-08-29 at 10:52:52 +08, Chuan Zheng wrote:
> 
>> Compare page hash results for recorded sampled page.
>>
>> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
>> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
>> ---
>>  migration/dirtyrate.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 63 insertions(+)
>>
>> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
>> index f4967fd..9cc2cbb 100644
>> --- a/migration/dirtyrate.c
>> +++ b/migration/dirtyrate.c
>> @@ -193,6 +193,69 @@ static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
>>      return 0;
>>  }
>>  
>> +static void calc_page_dirty_rate(struct RamblockDirtyInfo *info)
>> +{
>> +    uint32_t crc;
>> +    int i;
>> +
>> +    for (i = 0; i < info->sample_pages_count; i++) {
>> +        crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]);
>> +        if (crc != info->hash_result[i]) {
>> +            info->sample_dirty_count++;
>> +        }
>> +    }
>> +}
>> +
>> +static struct RamblockDirtyInfo *
>> +find_page_matched(RAMBlock *block, int count,
>> +                  struct RamblockDirtyInfo *infos)
>> +{
>> +    int i;
>> +    struct RamblockDirtyInfo *matched;
>> +
>> +    for (i = 0; i < count; i++) {
>> +        if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
>> +            break;
>> +        }
>> +    }
>> +
>> +    if (i == count) {
>> +        return NULL;
>> +    }
>> +
>> +    if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
>> +        infos[i].ramblock_pages !=
>> +            (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) {
>> +        return NULL;
>> +    }
>> +
>> +    matched = &infos[i];
>> +
>> +    return matched;
>> +}
>> +
>> +static int compare_page_hash_info(struct RamblockDirtyInfo *info,
>> +                                  int block_index)
>> +{
>> +    struct RamblockDirtyInfo *block_dinfo = NULL;
>> +    RAMBlock *block = NULL;
>> +
>> +    RAMBLOCK_FOREACH_MIGRATABLE(block) {
>> +        block_dinfo = find_page_matched(block, block_index + 1, info);
>> +        if (block_dinfo == NULL) {
>> +            continue;
>> +        }
>> +        calc_page_dirty_rate(block_dinfo);
>> +        update_dirtyrate_stat(block_dinfo);
>> +    }
>> +
>> +    if (!DirtyStat.total_sample_count) {
> 
> total_sample_count isn't a boolean or pointer - comparing against 0
> would be clearer.
> OK, will fix it in V7

>> +        return -1;
>> +    }
>> +
>> +    return 0;
>> +}
>> +
>>  static void calculate_dirtyrate(struct DirtyRateConfig config)
>>  {
>>      /* todo */
>> -- 
>> 1.8.3.1
> 
> dme.
>
diff mbox series

Patch

diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index f4967fd..9cc2cbb 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -193,6 +193,69 @@  static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
     return 0;
 }
 
+static void calc_page_dirty_rate(struct RamblockDirtyInfo *info)
+{
+    uint32_t crc;
+    int i;
+
+    for (i = 0; i < info->sample_pages_count; i++) {
+        crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]);
+        if (crc != info->hash_result[i]) {
+            info->sample_dirty_count++;
+        }
+    }
+}
+
+static struct RamblockDirtyInfo *
+find_page_matched(RAMBlock *block, int count,
+                  struct RamblockDirtyInfo *infos)
+{
+    int i;
+    struct RamblockDirtyInfo *matched;
+
+    for (i = 0; i < count; i++) {
+        if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
+            break;
+        }
+    }
+
+    if (i == count) {
+        return NULL;
+    }
+
+    if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
+        infos[i].ramblock_pages !=
+            (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) {
+        return NULL;
+    }
+
+    matched = &infos[i];
+
+    return matched;
+}
+
+static int compare_page_hash_info(struct RamblockDirtyInfo *info,
+                                  int block_index)
+{
+    struct RamblockDirtyInfo *block_dinfo = NULL;
+    RAMBlock *block = NULL;
+
+    RAMBLOCK_FOREACH_MIGRATABLE(block) {
+        block_dinfo = find_page_matched(block, block_index + 1, info);
+        if (block_dinfo == NULL) {
+            continue;
+        }
+        calc_page_dirty_rate(block_dinfo);
+        update_dirtyrate_stat(block_dinfo);
+    }
+
+    if (!DirtyStat.total_sample_count) {
+        return -1;
+    }
+
+    return 0;
+}
+
 static void calculate_dirtyrate(struct DirtyRateConfig config)
 {
     /* todo */