@@ -61,11 +61,12 @@ typedef struct SaveVMHandlers {
* pending data.
*/
/* This estimates the remaining data to transfer */
- void (*state_pending_estimate)(void *opaque, uint64_t *must_precopy,
+ void (*state_pending_estimate)(void *opaque, uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy);
/* This calculate the exact remaining data to transfer */
- void (*state_pending_exact)(void *opaque, uint64_t *must_precopy,
- uint64_t *can_postcopy);
+ void (*state_pending_exact)(void *opaque, uint64_t threshold_size,
+ uint64_t *must_precopy, uint64_t *can_postcopy);
LoadStateHandler *load_state;
int (*load_setup)(QEMUFile *f, void *opaque);
int (*load_cleanup)(void *opaque);
@@ -40,9 +40,11 @@ void qemu_savevm_state_cleanup(void);
void qemu_savevm_state_complete_postcopy(QEMUFile *f);
int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
bool inactivate_disks);
-void qemu_savevm_state_pending_exact(uint64_t *must_precopy,
+void qemu_savevm_state_pending_exact(uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy);
-void qemu_savevm_state_pending_estimate(uint64_t *must_precopy,
+void qemu_savevm_state_pending_estimate(uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy);
void qemu_savevm_send_ping(QEMUFile *f, uint32_t value);
void qemu_savevm_send_open_return_path(QEMUFile *f);
@@ -182,8 +182,8 @@ static int cmma_save_setup(QEMUFile *f, void *opaque)
return 0;
}
-static void cmma_state_pending(void *opaque, uint64_t *must_precopy,
- uint64_t *can_postcopy)
+static void cmma_state_pending(void *opaque, uint64_t threshold_size,
+ uint64_t *must_precopy, uint64_t *can_postcopy)
{
S390StAttribState *sas = S390_STATTRIB(opaque);
S390StAttribClass *sac = S390_STATTRIB_GET_CLASS(sas);
@@ -314,7 +314,8 @@ static void vfio_save_cleanup(void *opaque)
* repeatedly while pending RAM size is over the threshold, thus migration
* can't converge and querying the VFIO device pending data size is useless.
*/
-static void vfio_state_pending_exact(void *opaque, uint64_t *must_precopy,
+static void vfio_state_pending_exact(void *opaque, uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy)
{
VFIODevice *vbasedev = opaque;
@@ -762,7 +762,7 @@ static int dirty_bitmap_save_complete(QEMUFile *f, void *opaque)
return 0;
}
-static void dirty_bitmap_state_pending(void *opaque,
+static void dirty_bitmap_state_pending(void *opaque, uint64_t threshold_size,
uint64_t *must_precopy,
uint64_t *can_postcopy)
{
@@ -853,8 +853,8 @@ static int block_save_complete(QEMUFile *f, void *opaque)
return 0;
}
-static void block_state_pending(void *opaque, uint64_t *must_precopy,
- uint64_t *can_postcopy)
+static void block_state_pending(void *opaque, uint64_t threshold_size,
+ uint64_t *must_precopy, uint64_t *can_postcopy)
{
/* Estimate pending number of bytes to send */
uint64_t pending;
@@ -3866,15 +3866,19 @@ static MigIterateState migration_iteration_run(MigrationState *s)
uint64_t must_precopy, can_postcopy;
bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
- qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy);
+ qemu_savevm_state_pending_estimate(s->threshold_size, &must_precopy,
+ &can_postcopy);
uint64_t pending_size = must_precopy + can_postcopy;
- trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy);
+ trace_migrate_pending_estimate(pending_size, s->threshold_size,
+ must_precopy, can_postcopy);
if (must_precopy <= s->threshold_size) {
- qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy);
+ qemu_savevm_state_pending_exact(s->threshold_size, &must_precopy,
+ &can_postcopy);
pending_size = must_precopy + can_postcopy;
- trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy);
+ trace_migrate_pending_exact(pending_size, s->threshold_size,
+ must_precopy, can_postcopy);
}
if (!pending_size || pending_size < s->threshold_size) {
@@ -3489,7 +3489,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
return 0;
}
-static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy,
+static void ram_state_pending_estimate(void *opaque, uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy)
{
RAMState **temp = opaque;
@@ -3505,7 +3506,8 @@ static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy,
}
}
-static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy,
+static void ram_state_pending_exact(void *opaque, uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy)
{
RAMState **temp = opaque;
@@ -1541,7 +1541,8 @@ flush:
* the result is split into the amount for units that can and
* for units that can't do postcopy.
*/
-void qemu_savevm_state_pending_estimate(uint64_t *must_precopy,
+void qemu_savevm_state_pending_estimate(uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy)
{
SaveStateEntry *se;
@@ -1558,11 +1559,13 @@ void qemu_savevm_state_pending_estimate(uint64_t *must_precopy,
continue;
}
}
- se->ops->state_pending_estimate(se->opaque, must_precopy, can_postcopy);
+ se->ops->state_pending_estimate(se->opaque, threshold_size,
+ must_precopy, can_postcopy);
}
}
-void qemu_savevm_state_pending_exact(uint64_t *must_precopy,
+void qemu_savevm_state_pending_exact(uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy)
{
SaveStateEntry *se;
@@ -1579,7 +1582,8 @@ void qemu_savevm_state_pending_exact(uint64_t *must_precopy,
continue;
}
}
- se->ops->state_pending_exact(se->opaque, must_precopy, can_postcopy);
+ se->ops->state_pending_exact(se->opaque, threshold_size, must_precopy,
+ can_postcopy);
}
}
@@ -150,8 +150,8 @@ migrate_fd_cleanup(void) ""
migrate_fd_error(const char *error_desc) "error=%s"
migrate_fd_cancel(void) ""
migrate_handle_rp_req_pages(const char *rbname, size_t start, size_t len) "in %s at 0x%zx len 0x%zx"
-migrate_pending_exact(uint64_t size, uint64_t pre, uint64_t post) "exact pending size %" PRIu64 " (pre = %" PRIu64 " post=%" PRIu64 ")"
-migrate_pending_estimate(uint64_t size, uint64_t pre, uint64_t post) "estimate pending size %" PRIu64 " (pre = %" PRIu64 " post=%" PRIu64 ")"
+migrate_pending_exact(uint64_t size, uint64_t threshold_size, uint64_t pre, uint64_t post) "exact pending size %" PRIu64 " threshold size %" PRIu64 " (pre = %" PRIu64 " post=%" PRIu64 ")"
+migrate_pending_estimate(uint64_t size, uint64_t threshold_size, uint64_t pre, uint64_t post) "estimate pending size %" PRIu64 " threshold size %" PRIu64 " (pre = %" PRIu64 " post=%" PRIu64 ")"
migrate_send_rp_message(int msg_type, uint16_t len) "%d: len %d"
migrate_send_rp_recv_bitmap(char *name, int64_t size) "block '%s' size 0x%"PRIi64
migration_completion_file_err(void) ""
Pass threshold_size to .state_pending_{estimate,exact}(). This parameter will be used in the following patch by VFIO migration to force the complete transmission of all VFIO pre-copy initial bytes prior moving to stop-copy phase, which can reduce migration downtime. Signed-off-by: Avihai Horon <avihaih@nvidia.com> --- include/migration/register.h | 7 ++++--- migration/savevm.h | 6 ++++-- hw/s390x/s390-stattrib.c | 4 ++-- hw/vfio/migration.c | 3 ++- migration/block-dirty-bitmap.c | 2 +- migration/block.c | 4 ++-- migration/migration.c | 12 ++++++++---- migration/ram.c | 6 ++++-- migration/savevm.c | 12 ++++++++---- migration/trace-events | 4 ++-- 10 files changed, 37 insertions(+), 23 deletions(-)