@@ -47,6 +47,7 @@ config KVM
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_VFIO
select INTERVAL_TREE
+ select HAVE_KVM_DIRTY_QUOTA
select HAVE_KVM_PM_NOTIFIER if PM
select KVM_GENERIC_HARDWARE_ENABLING
help
@@ -3401,8 +3401,12 @@ static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu,
if (!try_cmpxchg64(sptep, &old_spte, new_spte))
return false;
- if (is_writable_pte(new_spte) && !is_writable_pte(old_spte))
+ if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
+ struct kvm_mmu_page *sp = sptep_to_sp(sptep);
+
+ update_dirty_quota(vcpu->kvm, (1L << SPTE_LEVEL_SHIFT(sp->role.level)));
mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
+ }
return true;
}
@@ -243,6 +243,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) {
/* Enforced by kvm_mmu_hugepage_adjust. */
WARN_ON(level > PG_LEVEL_4K);
+ update_dirty_quota(vcpu->kvm, (1L << SPTE_LEVEL_SHIFT(level)));
mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
}
@@ -5834,6 +5834,9 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
*/
if (__xfer_to_guest_mode_work_pending())
return 1;
+
+ if (kvm_test_request(KVM_REQ_DIRTY_QUOTA_EXIT, vcpu))
+ return 1;
}
return 1;
@@ -3125,6 +3125,7 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
guest_hv_clock->version = ++vcpu->hv_clock.version;
+ update_dirty_quota(v->kvm, PAGE_SIZE);
mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
read_unlock_irqrestore(&gpc->lock, flags);
@@ -3599,6 +3600,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
out:
user_access_end();
dirty:
+ update_dirty_quota(vcpu->kvm, PAGE_SIZE);
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
}
@@ -4878,6 +4880,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted)))
vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
+ update_dirty_quota(vcpu->kvm, PAGE_SIZE);
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
}
@@ -10608,7 +10611,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
r = 0;
goto out;
}
-
+ if (kvm_check_request(KVM_REQ_DIRTY_QUOTA_EXIT, vcpu)) {
+ vcpu->run->exit_reason = KVM_EXIT_DIRTY_QUOTA_EXHAUSTED;
+ r = 0;
+ goto out;
+ }
/*
* KVM_REQ_HV_STIMER has to be processed after
* KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
@@ -435,9 +435,12 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
read_unlock_irqrestore(&gpc1->lock, flags);
+ update_dirty_quota(v->kvm, PAGE_SIZE);
mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
- if (user_len2)
+ if (user_len2) {
+ update_dirty_quota(v->kvm, PAGE_SIZE);
mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
+ }
}
void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
@@ -549,6 +552,7 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
if (v->arch.xen.upcall_vector)
kvm_xen_inject_vcpu_vector(v);
+ update_dirty_quota(v->kvm, PAGE_SIZE);
mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
}