@@ -39,8 +39,7 @@ void qemu_savevm_state_header(QEMUFile *f);
int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy);
void qemu_savevm_state_cleanup(void);
void qemu_savevm_state_complete_postcopy(QEMUFile *f);
-int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
- bool inactivate_disks);
+int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only);
void qemu_savevm_state_pending_exact(uint64_t *must_precopy,
uint64_t *can_postcopy);
void qemu_savevm_state_pending_estimate(uint64_t *must_precopy,
@@ -68,6 +67,6 @@ int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis);
int qemu_load_device_state(QEMUFile *f);
int qemu_loadvm_approve_switchover(void);
int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
- bool in_postcopy, bool inactivate_disks);
+ bool in_postcopy);
#endif
@@ -2555,7 +2555,7 @@ static int postcopy_start(MigrationState *ms, Error **errp)
* Cause any non-postcopiable, but iterative devices to
* send out their final data.
*/
- qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false);
+ qemu_savevm_state_complete_precopy(ms->to_dst_file, true);
/*
* in Finish migrate and with the io-lock held everything should
@@ -2600,7 +2600,7 @@ static int postcopy_start(MigrationState *ms, Error **errp)
*/
qemu_savevm_send_postcopy_listen(fb);
- qemu_savevm_state_complete_precopy(fb, false, false);
+ qemu_savevm_state_complete_precopy(fb, false);
if (migrate_postcopy_ram()) {
qemu_savevm_send_ping(fb, 3);
}
@@ -2732,11 +2732,21 @@ static int migration_completion_precopy(MigrationState *s,
goto out_unlock;
}
+ /* Inactivate disks except in COLO */
+ if (!migrate_colo()) {
+ /*
+ * Inactivate before sending QEMU_VM_EOF so that the
+ * bdrv_activate_all() on the other end won't fail.
+ */
+ if (!migration_block_inactivate()) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+ }
+
migration_rate_set(RATE_LIMIT_DISABLED);
- /* Inactivate disks except in COLO */
- ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
- !migrate_colo());
+ ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false);
out_unlock:
bql_unlock();
return ret;
@@ -3617,7 +3627,7 @@ static void *bg_migration_thread(void *opaque)
* save their state to channel-buffer along with devices.
*/
cpu_synchronize_all_states();
- if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) {
+ if (qemu_savevm_state_complete_precopy_non_iterable(fb, false)) {
goto fail;
}
/*
@@ -1521,8 +1521,7 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy)
}
int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
- bool in_postcopy,
- bool inactivate_disks)
+ bool in_postcopy)
{
MigrationState *ms = migrate_get_current();
int64_t start_ts_each, end_ts_each;
@@ -1553,20 +1552,6 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
end_ts_each - start_ts_each);
}
- if (inactivate_disks) {
- /*
- * Inactivate before sending QEMU_VM_EOF so that the
- * bdrv_activate_all() on the other end won't fail.
- */
- if (!migration_block_inactivate()) {
- error_setg(&local_err, "%s: bdrv_inactivate_all() failed",
- __func__);
- migrate_set_error(ms, local_err);
- error_report_err(local_err);
- qemu_file_set_error(f, -EFAULT);
- return ret;
- }
- }
if (!in_postcopy) {
/* Postcopy stream will still be going */
qemu_put_byte(f, QEMU_VM_EOF);
@@ -1587,8 +1572,7 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
return 0;
}
-int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
- bool inactivate_disks)
+int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only)
{
int ret;
Error *local_err = NULL;
@@ -1613,8 +1597,7 @@ int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
goto flush;
}
- ret = qemu_savevm_state_complete_precopy_non_iterable(f, in_postcopy,
- inactivate_disks);
+ ret = qemu_savevm_state_complete_precopy_non_iterable(f, in_postcopy);
if (ret) {
return ret;
}
@@ -1717,7 +1700,7 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
ret = qemu_file_get_error(f);
if (ret == 0) {
- qemu_savevm_state_complete_precopy(f, false, false);
+ qemu_savevm_state_complete_precopy(f, false);
ret = qemu_file_get_error(f);
}
if (ret != 0) {
@@ -1743,7 +1726,7 @@ cleanup:
void qemu_savevm_live_state(QEMUFile *f)
{
/* save QEMU_VM_SECTION_END section */
- qemu_savevm_state_complete_precopy(f, true, false);
+ qemu_savevm_state_complete_precopy(f, true);
qemu_put_byte(f, QEMU_VM_EOF);
}
This parameter is only used by one caller, which is the genuine precopy complete path (migration_completion_precopy). The parameter was introduced in a1fbe750fd ("migration: Fix race of image locking between src and dst") to make sure the inactivate will happen before EOF to make sure dest will always be able to activate the disk properly. However there's no limitation on how early we inactivate the disk. For precopy completion path, we can always do that as long as VM is stopped. Move the disk inactivate there, then we can remove this inactivate_disk parameter in the whole call stack, because all the rest users pass in false always. Signed-off-by: Peter Xu <peterx@redhat.com> --- migration/savevm.h | 5 ++--- migration/migration.c | 22 ++++++++++++++++------ migration/savevm.c | 27 +++++---------------------- 3 files changed, 23 insertions(+), 31 deletions(-)