@@ -1352,6 +1352,10 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
*/
if (kvm_state->kvm_dirty_ring_size) {
kvm_dirty_ring_reap_locked(kvm_state, NULL);
+ if (kvm_state->kvm_dirty_ring_with_bitmap) {
+ kvm_slot_sync_dirty_pages(mem);
+ kvm_slot_get_dirty_log(kvm_state, mem);
+ }
} else {
kvm_slot_get_dirty_log(kvm_state, mem);
}
@@ -1573,6 +1577,12 @@ static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
mem = &kml->slots[i];
if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
kvm_slot_sync_dirty_pages(mem);
+
+ if (s->kvm_dirty_ring_with_bitmap && last_stage &&
+ kvm_slot_get_dirty_log(s, mem)) {
+ kvm_slot_sync_dirty_pages(mem);
+ }
+
/*
* This is not needed by KVM_GET_DIRTY_LOG because the
* ioctl will unconditionally overwrite the whole region.
@@ -3701,6 +3711,7 @@ static void kvm_accel_instance_init(Object *obj)
s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
/* KVM dirty ring is by default off */
s->kvm_dirty_ring_size = 0;
+ s->kvm_dirty_ring_with_bitmap = false;
s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
s->notify_window = 0;
}
@@ -115,6 +115,7 @@ struct KVMState
} *as;
uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */
uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */
+ bool kvm_dirty_ring_with_bitmap;
struct KVMDirtyRingReaper reaper;
NotifyVmexitOption notify_vmexit;
uint32_t notify_window;