diff mbox

[5/8] migration: move calling control_save_page to the common place

Message ID 20180313075739.11194-6-xiaoguangrong@tencent.com (mailing list archive)
State New, archived
Headers show

Commit Message

Xiao Guangrong March 13, 2018, 7:57 a.m. UTC
From: Xiao Guangrong <xiaoguangrong@tencent.com>

The function is called by both ram_save_page and ram_save_target_page,
so move it to the common caller to cleanup the code

Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
---
 migration/ram.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

Comments

Dr. David Alan Gilbert March 15, 2018, 11:47 a.m. UTC | #1
* guangrong.xiao@gmail.com (guangrong.xiao@gmail.com) wrote:
> From: Xiao Guangrong <xiaoguangrong@tencent.com>
> 
> The function is called by both ram_save_page and ram_save_target_page,
> so move it to the common caller to cleanup the code
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
> ---
>  migration/ram.c | 16 ++++++++--------
>  1 file changed, 8 insertions(+), 8 deletions(-)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index e7b8b14c3c..839665d866 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -1020,10 +1020,6 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
>      p = block->host + offset;
>      trace_ram_save_page(block->idstr, (uint64_t)offset, p);
>  
> -    if (control_save_page(rs, block, offset, &pages)) {
> -        return pages;
> -    }
> -
>      XBZRLE_cache_lock();
>      pages = save_zero_page(rs, block, offset);
>      if (pages > 0) {
> @@ -1176,10 +1172,6 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
>  
>      p = block->host + offset;
>  
> -    if (control_save_page(rs, block, offset, &pages)) {
> -        return pages;
> -    }
> -
>      /* When starting the process of a new block, the first page of
>       * the block should be sent out before other pages in the same
>       * block, and all the pages in last block should have been sent
> @@ -1472,6 +1464,13 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
>  
>      /* Check the pages is dirty and if it is send it */
>      if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
> +        RAMBlock *block = pss->block;
> +        ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
> +
> +        if (control_save_page(rs, block, offset, &res)) {
> +            goto page_saved;

OK, but I'd prefer if you avoided this forward goto;  we do use goto but
we tend to keep it just for error cases.

Dave

> +        }
> +
>          /*
>           * If xbzrle is on, stop using the data compression after first
>           * round of migration even if compression is enabled. In theory,
> @@ -1484,6 +1483,7 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
>              res = ram_save_page(rs, pss, last_stage);
>          }
>  
> +page_saved:
>          if (res < 0) {
>              return res;
>          }
> -- 
> 2.14.3
> 
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
Xiao Guangrong March 16, 2018, 8:59 a.m. UTC | #2
On 03/15/2018 07:47 PM, Dr. David Alan Gilbert wrote:

>>       /* Check the pages is dirty and if it is send it */
>>       if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
>> +        RAMBlock *block = pss->block;
>> +        ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
>> +
>> +        if (control_save_page(rs, block, offset, &res)) {
>> +            goto page_saved;
> 
> OK, but I'd prefer if you avoided this forward goto;  we do use goto but
> we tend to keep it just for error cases.
> 

There is a common operation, clearing unsentmap, for save_control,
save_zero, save_compressed and save_normal, if we do not use 'goto',
the operation would to be duplicated several times or we will have
big if...elseif...elseif... section.

So it may be not too bad to have 'goto' under this case? :)
Dr. David Alan Gilbert March 19, 2018, 1:15 p.m. UTC | #3
* Xiao Guangrong (guangrong.xiao@gmail.com) wrote:
> 
> 
> On 03/15/2018 07:47 PM, Dr. David Alan Gilbert wrote:
> 
> > >       /* Check the pages is dirty and if it is send it */
> > >       if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
> > > +        RAMBlock *block = pss->block;
> > > +        ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
> > > +
> > > +        if (control_save_page(rs, block, offset, &res)) {
> > > +            goto page_saved;
> > 
> > OK, but I'd prefer if you avoided this forward goto;  we do use goto but
> > we tend to keep it just for error cases.
> > 
> 
> There is a common operation, clearing unsentmap, for save_control,
> save_zero, save_compressed and save_normal, if we do not use 'goto',
> the operation would to be duplicated several times or we will have
> big if...elseif...elseif... section.
> 
> So it may be not too bad to have 'goto' under this case? :)

The problem is it always tends to creep a bit, and then you soon have
a knot of goto's.

I suggest you add a 'page_saved' bool, set it instead of taking the
goto, and then add a if (!page_saved) around the next section.
It doesn't need to nest for the last section; you just do another
if (!page_saved) if around that.

Dave

--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
Peter Xu March 27, 2018, 12:35 p.m. UTC | #4
On Tue, Mar 13, 2018 at 03:57:36PM +0800, guangrong.xiao@gmail.com wrote:
> From: Xiao Guangrong <xiaoguangrong@tencent.com>
> 
> The function is called by both ram_save_page and ram_save_target_page,
> so move it to the common caller to cleanup the code
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>

Reviewed-by: Peter Xu <peterx@redhat.com>
diff mbox

Patch

diff --git a/migration/ram.c b/migration/ram.c
index e7b8b14c3c..839665d866 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1020,10 +1020,6 @@  static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
     p = block->host + offset;
     trace_ram_save_page(block->idstr, (uint64_t)offset, p);
 
-    if (control_save_page(rs, block, offset, &pages)) {
-        return pages;
-    }
-
     XBZRLE_cache_lock();
     pages = save_zero_page(rs, block, offset);
     if (pages > 0) {
@@ -1176,10 +1172,6 @@  static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
 
     p = block->host + offset;
 
-    if (control_save_page(rs, block, offset, &pages)) {
-        return pages;
-    }
-
     /* When starting the process of a new block, the first page of
      * the block should be sent out before other pages in the same
      * block, and all the pages in last block should have been sent
@@ -1472,6 +1464,13 @@  static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
 
     /* Check the pages is dirty and if it is send it */
     if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
+        RAMBlock *block = pss->block;
+        ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
+
+        if (control_save_page(rs, block, offset, &res)) {
+            goto page_saved;
+        }
+
         /*
          * If xbzrle is on, stop using the data compression after first
          * round of migration even if compression is enabled. In theory,
@@ -1484,6 +1483,7 @@  static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
             res = ram_save_page(rs, pss, last_stage);
         }
 
+page_saved:
         if (res < 0) {
             return res;
         }