@@ -3622,11 +3622,7 @@ static void *bg_migration_thread(void *opaque)
if (migration_stop_vm(s, RUN_STATE_PAUSED)) {
goto fail;
}
- /*
- * Put vCPUs in sync with shadow context structures, then
- * save their state to channel-buffer along with devices.
- */
- cpu_synchronize_all_states();
+
if (qemu_savevm_state_complete_precopy_non_iterable(fb, false)) {
goto fail;
}
@@ -1531,6 +1531,9 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
Error *local_err = NULL;
int ret;
+ /* Making sure cpu states are synchronized before saving non-iterable */
+ cpu_synchronize_all_states();
+
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if (se->vmsd && se->vmsd->early_setup) {
/* Already saved during qemu_savevm_state_setup(). */
@@ -1584,8 +1587,6 @@ int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only)
trace_savevm_state_complete_precopy();
- cpu_synchronize_all_states();
-
if (!in_postcopy || iterable_only) {
ret = qemu_savevm_state_complete_precopy_iterable(f, in_postcopy);
if (ret) {
Do one shot cpu sync at qemu_savevm_state_complete_precopy_non_iterable(), instead of coding it separately in two places. Note that in the context of qemu_savevm_state_complete_precopy(), this patch is also an optimization for postcopy path, in that we can avoid sync cpu twice during switchover: before this patch, postcopy_start() invokes twice on qemu_savevm_state_complete_precopy(), each of them will try to sync CPU info. In reality, only one of them would be enough. For background snapshot, there's no intended functional change. Signed-off-by: Peter Xu <peterx@redhat.com> --- migration/migration.c | 6 +----- migration/savevm.c | 5 +++-- 2 files changed, 4 insertions(+), 7 deletions(-)