@@ -389,6 +389,8 @@ struct kvm_vcpu {
*/
struct kvm_memory_slot *last_used_slot;
u64 last_used_slot_gen;
+
+ bool extra_dirty;
};
/*
@@ -3444,6 +3444,11 @@ void mark_page_dirty_in_slot(struct kvm *kvm,
unsigned long rel_gfn = gfn - memslot->base_gfn;
u32 slot = (memslot->as_id << 16) | memslot->id;
+ if (!vcpu->extra_dirty &&
+ gfn_to_memslot(kvm, gfn + 1) == gfn) {
+ vcpu->extra_dirty = true;
+ mark_page_dirty_in_slot(kvm, memslot, gfn + 1);
+ }
if (kvm->dirty_ring_size && vcpu)
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
else if (memslot->dirty_bitmap)