Message ID | 20200224065414.36524-9-zhang.zhanghailiang@huawei.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Optimize VM's downtime while do checkpoint in COLO | expand |
* zhanghailiang (zhang.zhanghailiang@huawei.com) wrote: > After add migrating ram backgroud, we will call ram_load > for this process, but we should not flush ram cache during > this process. Move the flush action to the right place. > > Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> > --- > migration/colo.c | 1 + > migration/ram.c | 5 +---- > migration/ram.h | 1 + > 3 files changed, 3 insertions(+), 4 deletions(-) > > diff --git a/migration/colo.c b/migration/colo.c > index c36d94072f..18df8289f8 100644 > --- a/migration/colo.c > +++ b/migration/colo.c > @@ -799,6 +799,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, > > qemu_mutex_lock_iothread(); > vmstate_loading = true; > + colo_flush_ram_cache(); > ret = qemu_load_device_state(fb); > if (ret < 0) { > error_setg(errp, "COLO: load device state failed"); > diff --git a/migration/ram.c b/migration/ram.c > index 1b3f423351..7bc841d14f 100644 > --- a/migration/ram.c > +++ b/migration/ram.c > @@ -3305,7 +3305,7 @@ static bool postcopy_is_running(void) > * Flush content of RAM cache into SVM's memory. > * Only flush the pages that be dirtied by PVM or SVM or both. > */ > -static void colo_flush_ram_cache(void) > +void colo_flush_ram_cache(void) > { > RAMBlock *block = NULL; > void *dst_host; > @@ -3576,9 +3576,6 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) > } > trace_ram_load_complete(ret, seq_iter); > > - if (!ret && migration_incoming_in_colo_state()) { > - colo_flush_ram_cache(); > - } > return ret; > } > > diff --git a/migration/ram.h b/migration/ram.h > index 5ceaff7cb4..ae14341482 100644 > --- a/migration/ram.h > +++ b/migration/ram.h > @@ -67,5 +67,6 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb); > int colo_init_ram_cache(void); > void colo_release_ram_cache(void); > void colo_incoming_start_dirty_log(void); > +void colo_flush_ram_cache(void); > > #endif > -- > 2.21.0 > > -- Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
diff --git a/migration/colo.c b/migration/colo.c index c36d94072f..18df8289f8 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -799,6 +799,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, qemu_mutex_lock_iothread(); vmstate_loading = true; + colo_flush_ram_cache(); ret = qemu_load_device_state(fb); if (ret < 0) { error_setg(errp, "COLO: load device state failed"); diff --git a/migration/ram.c b/migration/ram.c index 1b3f423351..7bc841d14f 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -3305,7 +3305,7 @@ static bool postcopy_is_running(void) * Flush content of RAM cache into SVM's memory. * Only flush the pages that be dirtied by PVM or SVM or both. */ -static void colo_flush_ram_cache(void) +void colo_flush_ram_cache(void) { RAMBlock *block = NULL; void *dst_host; @@ -3576,9 +3576,6 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) } trace_ram_load_complete(ret, seq_iter); - if (!ret && migration_incoming_in_colo_state()) { - colo_flush_ram_cache(); - } return ret; } diff --git a/migration/ram.h b/migration/ram.h index 5ceaff7cb4..ae14341482 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -67,5 +67,6 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb); int colo_init_ram_cache(void); void colo_release_ram_cache(void); void colo_incoming_start_dirty_log(void); +void colo_flush_ram_cache(void); #endif
After add migrating ram backgroud, we will call ram_load for this process, but we should not flush ram cache during this process. Move the flush action to the right place. Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com> --- migration/colo.c | 1 + migration/ram.c | 5 +---- migration/ram.h | 1 + 3 files changed, 3 insertions(+), 4 deletions(-)