@@ -3230,7 +3230,10 @@ static bool mmu_load_shadow_page(struct kvm *kvm, struct kvm_mmu_page *sp)
break;
if (is_last_spte(spte, sp->role.level)) {
- flush |= spte_write_protect(sptep, false);
+ if (sp->role.level == PT_PAGE_TABLE_LEVEL)
+ flush |= spte_clear_dirty(sptep);
+ else
+ flush |= spte_write_protect(sptep, false);
continue;
}
@@ -6106,6 +6109,7 @@ void kvm_mmu_write_protect_all_pages(struct kvm *kvm, bool write_protect)
kvm_reload_remote_mmus(kvm);
spin_unlock(&kvm->mmu_lock);
}
+EXPORT_SYMBOL_GPL(kvm_mmu_write_protect_all_pages);
static unsigned long
mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
@@ -7180,14 +7180,13 @@ static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
static void vmx_slot_enable_log_dirty(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
- kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
- kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
+ kvm_mmu_write_protect_all_pages(kvm, true);
}
static void vmx_slot_disable_log_dirty(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
- kvm_mmu_slot_set_dirty(kvm, slot);
+ kvm_mmu_write_protect_all_pages(kvm, false);
}
static void vmx_flush_log_dirty(struct kvm *kvm)