diff mbox series

[v4,09/10] Add the function of colo_bitmap_clear_diry.

Message ID 1616639091-28279-10-git-send-email-lei.rao@intel.com (mailing list archive)
State New, archived
Headers show
Series Fixed some bugs and optimized some codes for COLO | expand

Commit Message

Lei Rao March 25, 2021, 2:24 a.m. UTC
From: "Rao, Lei" <lei.rao@intel.com>

When we use continuous dirty memory copy for flushing ram cache on
secondary VM, we can also clean up the bitmap of contiguous dirty
page memory. This also can reduce the VM stop time during checkpoint.

Signed-off-by: Lei Rao <lei.rao@intel.com>
---
 migration/ram.c | 29 +++++++++++++++++++++++++----
 1 file changed, 25 insertions(+), 4 deletions(-)

Comments

Dr. David Alan Gilbert March 25, 2021, 6:07 p.m. UTC | #1
* leirao (lei.rao@intel.com) wrote:
> From: "Rao, Lei" <lei.rao@intel.com>
> 
> When we use continuous dirty memory copy for flushing ram cache on
> secondary VM, we can also clean up the bitmap of contiguous dirty
> page memory. This also can reduce the VM stop time during checkpoint.
> 
> Signed-off-by: Lei Rao <lei.rao@intel.com>
> ---
>  migration/ram.c | 29 +++++++++++++++++++++++++----
>  1 file changed, 25 insertions(+), 4 deletions(-)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index a258466..ae1e659 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -855,6 +855,30 @@ unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
>      return first;
>  }
>  
> +/**
> + * colo_bitmap_clear_dirty:when we flush ram cache to ram, we will use
> + * continuous memory copy, so we can also clean up the bitmap of contiguous
> + * dirty memory.
> + */
> +static inline bool colo_bitmap_clear_dirty(RAMState *rs,
> +                                           RAMBlock *rb,
> +                                           unsigned long start,
> +                                           unsigned long num)
> +{
> +    bool ret;
> +    unsigned long i = 0;
> +
> +    qemu_mutex_lock(&rs->bitmap_mutex);

Please use QEMU_LOCK_GUARD(&rs->bitmap_mutex);

> +    for (i = 0; i < num; i++) {
> +        ret = test_and_clear_bit(start + i, rb->bmap);
> +        if (ret) {
> +            rs->migration_dirty_pages--;
> +        }
> +    }
> +    qemu_mutex_unlock(&rs->bitmap_mutex);
> +    return ret;

This implementation is missing the clear_bmap code that
migration_bitmap_clear_dirty has.
I think that's necessary now.

Are we sure there's any benefit in this?

Dave

> +}
> +
>  static inline bool migration_bitmap_clear_dirty(RAMState *rs,
>                                                  RAMBlock *rb,
>                                                  unsigned long page)
> @@ -3700,7 +3724,6 @@ void colo_flush_ram_cache(void)
>      void *src_host;
>      unsigned long offset = 0;
>      unsigned long num = 0;
> -    unsigned long i = 0;
>  
>      memory_global_dirty_log_sync();
>      WITH_RCU_READ_LOCK_GUARD() {
> @@ -3722,9 +3745,7 @@ void colo_flush_ram_cache(void)
>                  num = 0;
>                  block = QLIST_NEXT_RCU(block, next);
>              } else {
> -                for (i = 0; i < num; i++) {
> -                    migration_bitmap_clear_dirty(ram_state, block, offset + i);
> -                }
> +                colo_bitmap_clear_dirty(ram_state, block, offset, num);
>                  dst_host = block->host
>                           + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
>                  src_host = block->colo_cache
> -- 
> 1.8.3.1
>
Lei Rao March 26, 2021, 1:45 a.m. UTC | #2
-----Original Message-----
From: Dr. David Alan Gilbert <dgilbert@redhat.com> 
Sent: Friday, March 26, 2021 2:08 AM
To: Rao, Lei <lei.rao@intel.com>
Cc: Zhang, Chen <chen.zhang@intel.com>; lizhijian@cn.fujitsu.com; jasowang@redhat.com; quintela@redhat.com; pbonzini@redhat.com; lukasstraub2@web.de; qemu-devel@nongnu.org
Subject: Re: [PATCH v4 09/10] Add the function of colo_bitmap_clear_diry.

* leirao (lei.rao@intel.com) wrote:
> From: "Rao, Lei" <lei.rao@intel.com>
> 
> When we use continuous dirty memory copy for flushing ram cache on 
> secondary VM, we can also clean up the bitmap of contiguous dirty page 
> memory. This also can reduce the VM stop time during checkpoint.
> 
> Signed-off-by: Lei Rao <lei.rao@intel.com>
> ---
>  migration/ram.c | 29 +++++++++++++++++++++++++----
>  1 file changed, 25 insertions(+), 4 deletions(-)
> 
> diff --git a/migration/ram.c b/migration/ram.c index a258466..ae1e659 
> 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -855,6 +855,30 @@ unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
>      return first;
>  }
>  
> +/**
> + * colo_bitmap_clear_dirty:when we flush ram cache to ram, we will 
> +use
> + * continuous memory copy, so we can also clean up the bitmap of 
> +contiguous
> + * dirty memory.
> + */
> +static inline bool colo_bitmap_clear_dirty(RAMState *rs,
> +                                           RAMBlock *rb,
> +                                           unsigned long start,
> +                                           unsigned long num) {
> +    bool ret;
> +    unsigned long i = 0;
> +
> +    qemu_mutex_lock(&rs->bitmap_mutex);

Please use QEMU_LOCK_GUARD(&rs->bitmap_mutex);

Will be changed in V5. Thanks.

> +    for (i = 0; i < num; i++) {
> +        ret = test_and_clear_bit(start + i, rb->bmap);
> +        if (ret) {
> +            rs->migration_dirty_pages--;
> +        }
> +    }
> +    qemu_mutex_unlock(&rs->bitmap_mutex);
> +    return ret;

This implementation is missing the clear_bmap code that migration_bitmap_clear_dirty has.
I think that's necessary now.

Are we sure there's any benefit in this?

Dave

There is such a note about clear_bmap in struct RAMBlock:
"On destination side, this should always be NULL, and the variable `clear_bmap_shift' is meaningless."
This means that clear_bmap is always NULL on secondary VM. And for the behavior of flush ram cache to ram, we will always only happen on secondary VM.
So, I think the clear_bmap code is unnecessary for COLO.
As for the benefits, When the number of dirty pages from flush ram cache to ram is too much. it will reduce the number of locks acquired.

Lei

> +}
> +
>  static inline bool migration_bitmap_clear_dirty(RAMState *rs,
>                                                  RAMBlock *rb,
>                                                  unsigned long page) 
> @@ -3700,7 +3724,6 @@ void colo_flush_ram_cache(void)
>      void *src_host;
>      unsigned long offset = 0;
>      unsigned long num = 0;
> -    unsigned long i = 0;
>  
>      memory_global_dirty_log_sync();
>      WITH_RCU_READ_LOCK_GUARD() {
> @@ -3722,9 +3745,7 @@ void colo_flush_ram_cache(void)
>                  num = 0;
>                  block = QLIST_NEXT_RCU(block, next);
>              } else {
> -                for (i = 0; i < num; i++) {
> -                    migration_bitmap_clear_dirty(ram_state, block, offset + i);
> -                }
> +                colo_bitmap_clear_dirty(ram_state, block, offset, 
> + num);
>                  dst_host = block->host
>                           + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
>                  src_host = block->colo_cache
> --
> 1.8.3.1
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
Dr. David Alan Gilbert March 29, 2021, 11:31 a.m. UTC | #3
* Rao, Lei (lei.rao@intel.com) wrote:
> 
> -----Original Message-----
> From: Dr. David Alan Gilbert <dgilbert@redhat.com> 
> Sent: Friday, March 26, 2021 2:08 AM
> To: Rao, Lei <lei.rao@intel.com>
> Cc: Zhang, Chen <chen.zhang@intel.com>; lizhijian@cn.fujitsu.com; jasowang@redhat.com; quintela@redhat.com; pbonzini@redhat.com; lukasstraub2@web.de; qemu-devel@nongnu.org
> Subject: Re: [PATCH v4 09/10] Add the function of colo_bitmap_clear_diry.
> 
> * leirao (lei.rao@intel.com) wrote:
> > From: "Rao, Lei" <lei.rao@intel.com>
> > 
> > When we use continuous dirty memory copy for flushing ram cache on 
> > secondary VM, we can also clean up the bitmap of contiguous dirty page 
> > memory. This also can reduce the VM stop time during checkpoint.
> > 
> > Signed-off-by: Lei Rao <lei.rao@intel.com>
> > ---
> >  migration/ram.c | 29 +++++++++++++++++++++++++----
> >  1 file changed, 25 insertions(+), 4 deletions(-)
> > 
> > diff --git a/migration/ram.c b/migration/ram.c index a258466..ae1e659 
> > 100644
> > --- a/migration/ram.c
> > +++ b/migration/ram.c
> > @@ -855,6 +855,30 @@ unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
> >      return first;
> >  }
> >  
> > +/**
> > + * colo_bitmap_clear_dirty:when we flush ram cache to ram, we will 
> > +use
> > + * continuous memory copy, so we can also clean up the bitmap of 
> > +contiguous
> > + * dirty memory.
> > + */
> > +static inline bool colo_bitmap_clear_dirty(RAMState *rs,
> > +                                           RAMBlock *rb,
> > +                                           unsigned long start,
> > +                                           unsigned long num) {
> > +    bool ret;
> > +    unsigned long i = 0;
> > +
> > +    qemu_mutex_lock(&rs->bitmap_mutex);
> 
> Please use QEMU_LOCK_GUARD(&rs->bitmap_mutex);
> 
> Will be changed in V5. Thanks.
> 
> > +    for (i = 0; i < num; i++) {
> > +        ret = test_and_clear_bit(start + i, rb->bmap);
> > +        if (ret) {
> > +            rs->migration_dirty_pages--;
> > +        }
> > +    }
> > +    qemu_mutex_unlock(&rs->bitmap_mutex);
> > +    return ret;
> 
> This implementation is missing the clear_bmap code that migration_bitmap_clear_dirty has.
> I think that's necessary now.
> 
> Are we sure there's any benefit in this?
> 
> Dave
> 
> There is such a note about clear_bmap in struct RAMBlock:
> "On destination side, this should always be NULL, and the variable `clear_bmap_shift' is meaningless."
> This means that clear_bmap is always NULL on secondary VM. And for the behavior of flush ram cache to ram, we will always only happen on secondary VM.
> So, I think the clear_bmap code is unnecessary for COLO.

Ah yes; can you add a comment there to note this is on the secondary
to make that clear.

> As for the benefits, When the number of dirty pages from flush ram cache to ram is too much. it will reduce the number of locks acquired.

It might be good to measure the benefit.

Dave

> Lei
> 
> > +}
> > +
> >  static inline bool migration_bitmap_clear_dirty(RAMState *rs,
> >                                                  RAMBlock *rb,
> >                                                  unsigned long page) 
> > @@ -3700,7 +3724,6 @@ void colo_flush_ram_cache(void)
> >      void *src_host;
> >      unsigned long offset = 0;
> >      unsigned long num = 0;
> > -    unsigned long i = 0;
> >  
> >      memory_global_dirty_log_sync();
> >      WITH_RCU_READ_LOCK_GUARD() {
> > @@ -3722,9 +3745,7 @@ void colo_flush_ram_cache(void)
> >                  num = 0;
> >                  block = QLIST_NEXT_RCU(block, next);
> >              } else {
> > -                for (i = 0; i < num; i++) {
> > -                    migration_bitmap_clear_dirty(ram_state, block, offset + i);
> > -                }
> > +                colo_bitmap_clear_dirty(ram_state, block, offset, 
> > + num);
> >                  dst_host = block->host
> >                           + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
> >                  src_host = block->colo_cache
> > --
> > 1.8.3.1
> > 
> --
> Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
>
Lei Rao April 9, 2021, 3:52 a.m. UTC | #4
The performance data has been added to the commit message in V6.

Thanks,
Lei.

-----Original Message-----
From: Dr. David Alan Gilbert <dgilbert@redhat.com> 
Sent: Monday, March 29, 2021 7:32 PM
To: Rao, Lei <lei.rao@intel.com>
Cc: Zhang, Chen <chen.zhang@intel.com>; lizhijian@cn.fujitsu.com; jasowang@redhat.com; quintela@redhat.com; pbonzini@redhat.com; lukasstraub2@web.de; qemu-devel@nongnu.org
Subject: Re: [PATCH v4 09/10] Add the function of colo_bitmap_clear_diry.

* Rao, Lei (lei.rao@intel.com) wrote:
> 
> -----Original Message-----
> From: Dr. David Alan Gilbert <dgilbert@redhat.com>
> Sent: Friday, March 26, 2021 2:08 AM
> To: Rao, Lei <lei.rao@intel.com>
> Cc: Zhang, Chen <chen.zhang@intel.com>; lizhijian@cn.fujitsu.com; 
> jasowang@redhat.com; quintela@redhat.com; pbonzini@redhat.com; 
> lukasstraub2@web.de; qemu-devel@nongnu.org
> Subject: Re: [PATCH v4 09/10] Add the function of colo_bitmap_clear_diry.
> 
> * leirao (lei.rao@intel.com) wrote:
> > From: "Rao, Lei" <lei.rao@intel.com>
> > 
> > When we use continuous dirty memory copy for flushing ram cache on 
> > secondary VM, we can also clean up the bitmap of contiguous dirty 
> > page memory. This also can reduce the VM stop time during checkpoint.
> > 
> > Signed-off-by: Lei Rao <lei.rao@intel.com>
> > ---
> >  migration/ram.c | 29 +++++++++++++++++++++++++----
> >  1 file changed, 25 insertions(+), 4 deletions(-)
> > 
> > diff --git a/migration/ram.c b/migration/ram.c index 
> > a258466..ae1e659
> > 100644
> > --- a/migration/ram.c
> > +++ b/migration/ram.c
> > @@ -855,6 +855,30 @@ unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
> >      return first;
> >  }
> >  
> > +/**
> > + * colo_bitmap_clear_dirty:when we flush ram cache to ram, we will 
> > +use
> > + * continuous memory copy, so we can also clean up the bitmap of 
> > +contiguous
> > + * dirty memory.
> > + */
> > +static inline bool colo_bitmap_clear_dirty(RAMState *rs,
> > +                                           RAMBlock *rb,
> > +                                           unsigned long start,
> > +                                           unsigned long num) {
> > +    bool ret;
> > +    unsigned long i = 0;
> > +
> > +    qemu_mutex_lock(&rs->bitmap_mutex);
> 
> Please use QEMU_LOCK_GUARD(&rs->bitmap_mutex);
> 
> Will be changed in V5. Thanks.
> 
> > +    for (i = 0; i < num; i++) {
> > +        ret = test_and_clear_bit(start + i, rb->bmap);
> > +        if (ret) {
> > +            rs->migration_dirty_pages--;
> > +        }
> > +    }
> > +    qemu_mutex_unlock(&rs->bitmap_mutex);
> > +    return ret;
> 
> This implementation is missing the clear_bmap code that migration_bitmap_clear_dirty has.
> I think that's necessary now.
> 
> Are we sure there's any benefit in this?
> 
> Dave
> 
> There is such a note about clear_bmap in struct RAMBlock:
> "On destination side, this should always be NULL, and the variable `clear_bmap_shift' is meaningless."
> This means that clear_bmap is always NULL on secondary VM. And for the behavior of flush ram cache to ram, we will always only happen on secondary VM.
> So, I think the clear_bmap code is unnecessary for COLO.

Ah yes; can you add a comment there to note this is on the secondary to make that clear.

> As for the benefits, When the number of dirty pages from flush ram cache to ram is too much. it will reduce the number of locks acquired.

It might be good to measure the benefit.

Dave

> Lei
> 
> > +}
> > +
> >  static inline bool migration_bitmap_clear_dirty(RAMState *rs,
> >                                                  RAMBlock *rb,
> >                                                  unsigned long page) 
> > @@ -3700,7 +3724,6 @@ void colo_flush_ram_cache(void)
> >      void *src_host;
> >      unsigned long offset = 0;
> >      unsigned long num = 0;
> > -    unsigned long i = 0;
> >  
> >      memory_global_dirty_log_sync();
> >      WITH_RCU_READ_LOCK_GUARD() {
> > @@ -3722,9 +3745,7 @@ void colo_flush_ram_cache(void)
> >                  num = 0;
> >                  block = QLIST_NEXT_RCU(block, next);
> >              } else {
> > -                for (i = 0; i < num; i++) {
> > -                    migration_bitmap_clear_dirty(ram_state, block, offset + i);
> > -                }
> > +                colo_bitmap_clear_dirty(ram_state, block, offset, 
> > + num);
> >                  dst_host = block->host
> >                           + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
> >                  src_host = block->colo_cache
> > --
> > 1.8.3.1
> > 
> --
> Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
diff mbox series

Patch

diff --git a/migration/ram.c b/migration/ram.c
index a258466..ae1e659 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -855,6 +855,30 @@  unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
     return first;
 }
 
+/**
+ * colo_bitmap_clear_dirty:when we flush ram cache to ram, we will use
+ * continuous memory copy, so we can also clean up the bitmap of contiguous
+ * dirty memory.
+ */
+static inline bool colo_bitmap_clear_dirty(RAMState *rs,
+                                           RAMBlock *rb,
+                                           unsigned long start,
+                                           unsigned long num)
+{
+    bool ret;
+    unsigned long i = 0;
+
+    qemu_mutex_lock(&rs->bitmap_mutex);
+    for (i = 0; i < num; i++) {
+        ret = test_and_clear_bit(start + i, rb->bmap);
+        if (ret) {
+            rs->migration_dirty_pages--;
+        }
+    }
+    qemu_mutex_unlock(&rs->bitmap_mutex);
+    return ret;
+}
+
 static inline bool migration_bitmap_clear_dirty(RAMState *rs,
                                                 RAMBlock *rb,
                                                 unsigned long page)
@@ -3700,7 +3724,6 @@  void colo_flush_ram_cache(void)
     void *src_host;
     unsigned long offset = 0;
     unsigned long num = 0;
-    unsigned long i = 0;
 
     memory_global_dirty_log_sync();
     WITH_RCU_READ_LOCK_GUARD() {
@@ -3722,9 +3745,7 @@  void colo_flush_ram_cache(void)
                 num = 0;
                 block = QLIST_NEXT_RCU(block, next);
             } else {
-                for (i = 0; i < num; i++) {
-                    migration_bitmap_clear_dirty(ram_state, block, offset + i);
-                }
+                colo_bitmap_clear_dirty(ram_state, block, offset, num);
                 dst_host = block->host
                          + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
                 src_host = block->colo_cache