@@ -892,6 +892,8 @@ struct kvm_arch {
u64 master_cycle_now;
struct delayed_work kvmclock_update_work;
struct delayed_work kvmclock_sync_work;
+ struct delayed_work kvm_mmu_zap_collapsible_sptes_work;
+ bool zap_in_progress;
struct kvm_xen_hvm_config xen_hvm_config;
@@ -1247,6 +1249,7 @@ void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+void zap_collapsible_sptes_fn(struct work_struct *work);
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
bool pdptrs_changed(struct kvm_vcpu *vcpu);
@@ -5679,14 +5679,41 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
return need_tlb_flush;
}
+void zap_collapsible_sptes_fn(struct work_struct *work)
+{
+ struct kvm_memory_slot *memslot;
+ struct kvm_memslots *slots;
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
+ kvm_mmu_zap_collapsible_sptes_work);
+ struct kvm *kvm = container_of(ka, struct kvm, arch);
+ int i;
+
+ mutex_lock(&kvm->slots_lock);
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ spin_lock(&kvm->mmu_lock);
+ slots = __kvm_memslots(kvm, i);
+ kvm_for_each_memslot(memslot, slots) {
+ slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
+ kvm_mmu_zap_collapsible_spte, true);
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock))
+ cond_resched_lock(&kvm->mmu_lock);
+ }
+ spin_unlock(&kvm->mmu_lock);
+ }
+ kvm->arch.zap_in_progress = false;
+ mutex_unlock(&kvm->slots_lock);
+}
+
+#define KVM_MMU_ZAP_DELAYED (60 * HZ)
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *memslot)
{
- /* FIXME: const-ify all uses of struct kvm_memory_slot. */
- spin_lock(&kvm->mmu_lock);
- slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
- kvm_mmu_zap_collapsible_spte, true);
- spin_unlock(&kvm->mmu_lock);
+ if (!kvm->arch.zap_in_progress) {
+ kvm->arch.zap_in_progress = true;
+ schedule_delayed_work(&kvm->arch.kvm_mmu_zap_collapsible_sptes_work,
+ KVM_MMU_ZAP_DELAYED);
+ }
}
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
@@ -9019,6 +9019,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
+ INIT_DELAYED_WORK(&kvm->arch.kvm_mmu_zap_collapsible_sptes_work,
+ zap_collapsible_sptes_fn);
+ kvm->arch.zap_in_progress = false;
kvm_hv_init_vm(kvm);
kvm_page_track_init(kvm);
@@ -9064,6 +9067,7 @@ void kvm_arch_sync_events(struct kvm *kvm)
{
cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
+ cancel_delayed_work_sync(&kvm->arch.kvm_mmu_zap_collapsible_sptes_work);
kvm_free_pit(kvm);
}