Message ID | 20170315135021.6978-3-quintela@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
* Juan Quintela (quintela@redhat.com) wrote: > We need to add a parameter to several functions to make this work. > > Signed-off-by: Juan Quintela <quintela@redhat.com> > --- > migration/ram.c | 23 ++++++++++++----------- > 1 file changed, 12 insertions(+), 11 deletions(-) > > diff --git a/migration/ram.c b/migration/ram.c > index c20a539..9120755 100644 > --- a/migration/ram.c > +++ b/migration/ram.c > @@ -45,8 +45,6 @@ > #include "qemu/rcu_queue.h" > #include "migration/colo.h" > > -static int dirty_rate_high_cnt; > - > static uint64_t bitmap_sync_count; > > /***********************************************************/ > @@ -148,6 +146,8 @@ struct RAMState { > uint32_t last_version; > /* We are in the first round */ > bool ram_bulk_stage; > + /* How many times we have dirty too many pages */ > + int dirty_rate_high_cnt; > }; > typedef struct RAMState RAMState; > > @@ -626,7 +626,7 @@ uint64_t ram_pagesize_summary(void) > return summary; > } > > -static void migration_bitmap_sync(void) > +static void migration_bitmap_sync(RAMState *rs) > { > RAMBlock *block; > uint64_t num_dirty_pages_init = migration_dirty_pages; > @@ -673,9 +673,9 @@ static void migration_bitmap_sync(void) > if (s->dirty_pages_rate && > (num_dirty_pages_period * TARGET_PAGE_SIZE > > (bytes_xfer_now - bytes_xfer_prev)/2) && > - (dirty_rate_high_cnt++ >= 2)) { > + (rs->dirty_rate_high_cnt++ >= 2)) { > trace_migration_throttle(); > - dirty_rate_high_cnt = 0; > + rs->dirty_rate_high_cnt = 0; > mig_throttle_guest_down(); > } > bytes_xfer_prev = bytes_xfer_now; > @@ -1859,7 +1859,7 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) > rcu_read_lock(); > > /* This should be our last sync, the src is now paused */ > - migration_bitmap_sync(); > + migration_bitmap_sync(&ram_state); > > unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; > if (!unsentmap) { > @@ -1935,7 +1935,7 @@ static int ram_save_init_globals(RAMState *rs) > { > int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */ > > - dirty_rate_high_cnt = 0; > + rs->dirty_rate_high_cnt = 0; > bitmap_sync_count = 0; > migration_bitmap_sync_init(); > qemu_mutex_init(&migration_bitmap_mutex); > @@ -1999,7 +1999,7 @@ static int ram_save_init_globals(RAMState *rs) > migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS; > > memory_global_dirty_log_start(); > - migration_bitmap_sync(); > + migration_bitmap_sync(rs); > qemu_mutex_unlock_ramlist(); > qemu_mutex_unlock_iothread(); > rcu_read_unlock(); > @@ -2117,11 +2117,11 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) > static int ram_save_complete(QEMUFile *f, void *opaque) > { > RAMState *rs = opaque; > - > + Is that undoing false spaces from the previous patch? anyway, Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> > rcu_read_lock(); > > if (!migration_in_postcopy(migrate_get_current())) { > - migration_bitmap_sync(); > + migration_bitmap_sync(rs); > } > > ram_control_before_iterate(f, RAM_CONTROL_FINISH); > @@ -2154,6 +2154,7 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, > uint64_t *non_postcopiable_pending, > uint64_t *postcopiable_pending) > { > + RAMState *rs = opaque; > uint64_t remaining_size; > > remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; > @@ -2162,7 +2163,7 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, > remaining_size < max_size) { > qemu_mutex_lock_iothread(); > rcu_read_lock(); > - migration_bitmap_sync(); > + migration_bitmap_sync(rs); > rcu_read_unlock(); > qemu_mutex_unlock_iothread(); > remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; > -- > 2.9.3 > -- Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
On 03/16/2017 09:20 AM, Dr. David Alan Gilbert wrote: > * Juan Quintela (quintela@redhat.com) wrote: >> We need to add a parameter to several functions to make this work. >> >> Signed-off-by: Juan Quintela <quintela@redhat.com> >> --- >> migration/ram.c | 23 ++++++++++++----------- >> 1 file changed, 12 insertions(+), 11 deletions(-) >> >> diff --git a/migration/ram.c b/migration/ram.c >> index c20a539..9120755 100644 >> --- a/migration/ram.c >> +++ b/migration/ram.c >> @@ -45,8 +45,6 @@ >> #include "qemu/rcu_queue.h" >> #include "migration/colo.h" >> >> -static int dirty_rate_high_cnt; >> - >> static uint64_t bitmap_sync_count; >> >> /***********************************************************/ >> @@ -148,6 +146,8 @@ struct RAMState { >> uint32_t last_version; >> /* We are in the first round */ >> bool ram_bulk_stage; >> + /* How many times we have dirty too many pages */ >> + int dirty_rate_high_cnt; >> }; >> typedef struct RAMState RAMState; >> >> @@ -626,7 +626,7 @@ uint64_t ram_pagesize_summary(void) >> return summary; >> } >> >> -static void migration_bitmap_sync(void) >> +static void migration_bitmap_sync(RAMState *rs) >> { >> RAMBlock *block; >> uint64_t num_dirty_pages_init = migration_dirty_pages; >> @@ -673,9 +673,9 @@ static void migration_bitmap_sync(void) >> if (s->dirty_pages_rate && >> (num_dirty_pages_period * TARGET_PAGE_SIZE > >> (bytes_xfer_now - bytes_xfer_prev)/2) && >> - (dirty_rate_high_cnt++ >= 2)) { >> + (rs->dirty_rate_high_cnt++ >= 2)) { >> trace_migration_throttle(); >> - dirty_rate_high_cnt = 0; >> + rs->dirty_rate_high_cnt = 0; >> mig_throttle_guest_down(); >> } >> bytes_xfer_prev = bytes_xfer_now; >> @@ -1859,7 +1859,7 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) >> rcu_read_lock(); >> >> /* This should be our last sync, the src is now paused */ >> - migration_bitmap_sync(); >> + migration_bitmap_sync(&ram_state); >> >> unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; >> if (!unsentmap) { >> @@ -1935,7 +1935,7 @@ static int ram_save_init_globals(RAMState *rs) >> { >> int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */ >> >> - dirty_rate_high_cnt = 0; >> + rs->dirty_rate_high_cnt = 0; >> bitmap_sync_count = 0; >> migration_bitmap_sync_init(); >> qemu_mutex_init(&migration_bitmap_mutex); >> @@ -1999,7 +1999,7 @@ static int ram_save_init_globals(RAMState *rs) >> migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS; >> >> memory_global_dirty_log_start(); >> - migration_bitmap_sync(); >> + migration_bitmap_sync(rs); >> qemu_mutex_unlock_ramlist(); >> qemu_mutex_unlock_iothread(); >> rcu_read_unlock(); >> @@ -2117,11 +2117,11 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) >> static int ram_save_complete(QEMUFile *f, void *opaque) >> { >> RAMState *rs = opaque; >> - >> + > > Is that undoing false spaces from the previous patch? > > anyway, > Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> > Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> >> rcu_read_lock(); >> >> if (!migration_in_postcopy(migrate_get_current())) { >> - migration_bitmap_sync(); >> + migration_bitmap_sync(rs); >> } >> >> ram_control_before_iterate(f, RAM_CONTROL_FINISH); >> @@ -2154,6 +2154,7 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, >> uint64_t *non_postcopiable_pending, >> uint64_t *postcopiable_pending) >> { >> + RAMState *rs = opaque; >> uint64_t remaining_size; >> >> remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; >> @@ -2162,7 +2163,7 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, >> remaining_size < max_size) { >> qemu_mutex_lock_iothread(); >> rcu_read_lock(); >> - migration_bitmap_sync(); >> + migration_bitmap_sync(rs); >> rcu_read_unlock(); >> qemu_mutex_unlock_iothread(); >> remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; >> -- >> 2.9.3 >> > -- > Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK >
"Dr. David Alan Gilbert" <dgilbert@redhat.com> wrote: > * Juan Quintela (quintela@redhat.com) wrote: >> We need to add a parameter to several functions to make this work. >> >> Signed-off-by: Juan Quintela <quintela@redhat.com> [...] > Is that undoing false spaces from the previous patch? Yes O:-) > > anyway, > Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Thanks.
diff --git a/migration/ram.c b/migration/ram.c index c20a539..9120755 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -45,8 +45,6 @@ #include "qemu/rcu_queue.h" #include "migration/colo.h" -static int dirty_rate_high_cnt; - static uint64_t bitmap_sync_count; /***********************************************************/ @@ -148,6 +146,8 @@ struct RAMState { uint32_t last_version; /* We are in the first round */ bool ram_bulk_stage; + /* How many times we have dirty too many pages */ + int dirty_rate_high_cnt; }; typedef struct RAMState RAMState; @@ -626,7 +626,7 @@ uint64_t ram_pagesize_summary(void) return summary; } -static void migration_bitmap_sync(void) +static void migration_bitmap_sync(RAMState *rs) { RAMBlock *block; uint64_t num_dirty_pages_init = migration_dirty_pages; @@ -673,9 +673,9 @@ static void migration_bitmap_sync(void) if (s->dirty_pages_rate && (num_dirty_pages_period * TARGET_PAGE_SIZE > (bytes_xfer_now - bytes_xfer_prev)/2) && - (dirty_rate_high_cnt++ >= 2)) { + (rs->dirty_rate_high_cnt++ >= 2)) { trace_migration_throttle(); - dirty_rate_high_cnt = 0; + rs->dirty_rate_high_cnt = 0; mig_throttle_guest_down(); } bytes_xfer_prev = bytes_xfer_now; @@ -1859,7 +1859,7 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) rcu_read_lock(); /* This should be our last sync, the src is now paused */ - migration_bitmap_sync(); + migration_bitmap_sync(&ram_state); unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; if (!unsentmap) { @@ -1935,7 +1935,7 @@ static int ram_save_init_globals(RAMState *rs) { int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */ - dirty_rate_high_cnt = 0; + rs->dirty_rate_high_cnt = 0; bitmap_sync_count = 0; migration_bitmap_sync_init(); qemu_mutex_init(&migration_bitmap_mutex); @@ -1999,7 +1999,7 @@ static int ram_save_init_globals(RAMState *rs) migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS; memory_global_dirty_log_start(); - migration_bitmap_sync(); + migration_bitmap_sync(rs); qemu_mutex_unlock_ramlist(); qemu_mutex_unlock_iothread(); rcu_read_unlock(); @@ -2117,11 +2117,11 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) static int ram_save_complete(QEMUFile *f, void *opaque) { RAMState *rs = opaque; - + rcu_read_lock(); if (!migration_in_postcopy(migrate_get_current())) { - migration_bitmap_sync(); + migration_bitmap_sync(rs); } ram_control_before_iterate(f, RAM_CONTROL_FINISH); @@ -2154,6 +2154,7 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, uint64_t *non_postcopiable_pending, uint64_t *postcopiable_pending) { + RAMState *rs = opaque; uint64_t remaining_size; remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; @@ -2162,7 +2163,7 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, remaining_size < max_size) { qemu_mutex_lock_iothread(); rcu_read_lock(); - migration_bitmap_sync(); + migration_bitmap_sync(rs); rcu_read_unlock(); qemu_mutex_unlock_iothread(); remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
We need to add a parameter to several functions to make this work. Signed-off-by: Juan Quintela <quintela@redhat.com> --- migration/ram.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-)