@@ -508,6 +508,7 @@ struct kvm {
struct kvm_vm_run *vm_run;
u32 dirty_ring_size;
struct kvm_dirty_ring vm_dirty_ring;
+ wait_queue_head_t dirty_ring_waitqueue;
};
#define kvm_err(fmt, ...) \
@@ -722,6 +722,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock);
INIT_LIST_HEAD(&kvm->devices);
+ init_waitqueue_head(&kvm->dirty_ring_waitqueue);
BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
@@ -3370,16 +3371,23 @@ static void mark_page_dirty_in_ring(struct kvm *kvm,
is_vm_ring = true;
}
+retry:
ret = kvm_dirty_ring_push(ring, indexes,
(as_id << 16)|slot->id, offset,
is_vm_ring);
if (ret < 0) {
- if (is_vm_ring)
- pr_warn_once("vcpu %d dirty log overflow\n",
- vcpu->vcpu_id);
- else
- pr_warn_once("per-vm dirty log overflow\n");
- return;
+ /*
+ * Ring is full, put us onto per-vm waitqueue and wait
+ * for another KVM_RESET_DIRTY_RINGS to retry
+ */
+ wait_event_killable(kvm->dirty_ring_waitqueue,
+ !kvm_dirty_ring_full(ring));
+
+ /* If we're killed, no worry on lossing dirty bits! */
+ if (fatal_signal_pending(current))
+ return;
+
+ goto retry;
}
if (ret)
@@ -3475,6 +3483,8 @@ static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
if (cleared)
kvm_flush_remote_tlbs(kvm);
+ wake_up_all(&kvm->dirty_ring_waitqueue);
+
return cleared;
}
When the dirty ring is completely full, right now we throw an error message and drop the dirty bit. A better approach could be that we put the thread onto a waitqueue and retry after another KVM_RESET_DIRTY_RINGS. We should still allow the process to be killed, so handle it explicitly. Signed-off-by: Peter Xu <peterx@redhat.com> --- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 22 ++++++++++++++++------ 2 files changed, 17 insertions(+), 6 deletions(-)