@@ -1554,7 +1554,7 @@ static void kvm_log_sync(MemoryListener *listener,
kvm_slots_unlock();
}
-static void kvm_log_sync_global(MemoryListener *l)
+static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
{
KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
KVMState *s = kvm_state;
@@ -929,8 +929,11 @@ struct MemoryListener {
* its @log_sync must be NULL. Vice versa.
*
* @listener: The #MemoryListener.
+ * @last_stage: The last stage to synchronize the log during migration.
+ * The caller should gurantee that the synchronization with true for
+ * @last_stage is triggered for once after all VCPUs have been stopped.
*/
- void (*log_sync_global)(MemoryListener *listener);
+ void (*log_sync_global)(MemoryListener *listener, bool last_stage);
/**
* @log_clear:
@@ -2408,7 +2411,7 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr,
*
* Synchronizes the dirty page log for all address spaces.
*/
-void memory_global_dirty_log_sync(void);
+void memory_global_dirty_log_sync(bool last_stage);
/**
* memory_global_dirty_log_sync: synchronize the dirty log for all memory
@@ -101,7 +101,7 @@ void global_dirty_log_change(unsigned int flag, bool start)
static void global_dirty_log_sync(unsigned int flag, bool one_shot)
{
qemu_mutex_lock_iothread();
- memory_global_dirty_log_sync();
+ memory_global_dirty_log_sync(false);
if (one_shot) {
memory_global_dirty_log_stop(flag);
}
@@ -553,7 +553,7 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
* skip it unconditionally and start dirty tracking
* from 2'round of log sync
*/
- memory_global_dirty_log_sync();
+ memory_global_dirty_log_sync(false);
/*
* reset page protect manually and unconditionally.
@@ -1217,7 +1217,7 @@ static void migration_trigger_throttle(RAMState *rs)
}
}
-static void migration_bitmap_sync(RAMState *rs)
+static void migration_bitmap_sync(RAMState *rs, bool last_stage)
{
RAMBlock *block;
int64_t end_time;
@@ -1229,7 +1229,7 @@ static void migration_bitmap_sync(RAMState *rs)
}
trace_migration_bitmap_sync_start();
- memory_global_dirty_log_sync();
+ memory_global_dirty_log_sync(last_stage);
qemu_mutex_lock(&rs->bitmap_mutex);
WITH_RCU_READ_LOCK_GUARD() {
@@ -1263,7 +1263,7 @@ static void migration_bitmap_sync(RAMState *rs)
}
}
-static void migration_bitmap_sync_precopy(RAMState *rs)
+static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage)
{
Error *local_err = NULL;
@@ -1276,7 +1276,7 @@ static void migration_bitmap_sync_precopy(RAMState *rs)
local_err = NULL;
}
- migration_bitmap_sync(rs);
+ migration_bitmap_sync(rs, last_stage);
if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
error_report_err(local_err);
@@ -2937,7 +2937,7 @@ void ram_postcopy_send_discard_bitmap(MigrationState *ms)
RCU_READ_LOCK_GUARD();
/* This should be our last sync, the src is now paused */
- migration_bitmap_sync(rs);
+ migration_bitmap_sync(rs, false);
/* Easiest way to make sure we don't resume in the middle of a host-page */
rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL;
@@ -3128,7 +3128,7 @@ static void ram_init_bitmaps(RAMState *rs)
/* We don't use dirty log with background snapshots */
if (!migrate_background_snapshot()) {
memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
- migration_bitmap_sync_precopy(rs);
+ migration_bitmap_sync_precopy(rs, false);
}
}
qemu_mutex_unlock_ramlist();
@@ -3446,7 +3446,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
WITH_RCU_READ_LOCK_GUARD() {
if (!migration_in_postcopy()) {
- migration_bitmap_sync_precopy(rs);
+ migration_bitmap_sync_precopy(rs, true);
}
ram_control_before_iterate(f, RAM_CONTROL_FINISH);
@@ -3516,7 +3516,7 @@ static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy,
if (!migration_in_postcopy()) {
qemu_mutex_lock_iothread();
WITH_RCU_READ_LOCK_GUARD() {
- migration_bitmap_sync_precopy(rs);
+ migration_bitmap_sync_precopy(rs, false);
}
qemu_mutex_unlock_iothread();
remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
@@ -3926,7 +3926,7 @@ void colo_incoming_start_dirty_log(void)
qemu_mutex_lock_iothread();
qemu_mutex_lock_ramlist();
- memory_global_dirty_log_sync();
+ memory_global_dirty_log_sync(false);
WITH_RCU_READ_LOCK_GUARD() {
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
ramblock_sync_dirty_bitmap(ram_state, block);
@@ -4217,7 +4217,7 @@ void colo_flush_ram_cache(void)
void *src_host;
unsigned long offset = 0;
- memory_global_dirty_log_sync();
+ memory_global_dirty_log_sync(false);
WITH_RCU_READ_LOCK_GUARD() {
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
ramblock_sync_dirty_bitmap(ram_state, block);
@@ -2224,7 +2224,7 @@ void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
* If memory region `mr' is NULL, do global sync. Otherwise, sync
* dirty bitmap for the specified memory region.
*/
-static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
+static void memory_region_sync_dirty_bitmap(MemoryRegion *mr, bool last_stage)
{
MemoryListener *listener;
AddressSpace *as;
@@ -2254,7 +2254,7 @@ static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
* is to do a global sync, because we are not capable to
* sync in a finer granularity.
*/
- listener->log_sync_global(listener);
+ listener->log_sync_global(listener, last_stage);
trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1);
}
}
@@ -2318,7 +2318,7 @@ DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
{
DirtyBitmapSnapshot *snapshot;
assert(mr->ram_block);
- memory_region_sync_dirty_bitmap(mr);
+ memory_region_sync_dirty_bitmap(mr, false);
snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
memory_global_after_dirty_log_sync();
return snapshot;
@@ -2844,9 +2844,9 @@ bool memory_region_present(MemoryRegion *container, hwaddr addr)
return mr && mr != container;
}
-void memory_global_dirty_log_sync(void)
+void memory_global_dirty_log_sync(bool last_stage)
{
- memory_region_sync_dirty_bitmap(NULL);
+ memory_region_sync_dirty_bitmap(NULL, last_stage);
}
void memory_global_after_dirty_log_sync(void)