@@ -229,7 +229,8 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool flush);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
@@ -361,7 +361,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool flush);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
@@ -937,7 +937,8 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool flush);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/* Emulation */
@@ -579,9 +579,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
return pte_young(*gpa_pte);
}
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool flush)
{
- return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
+ int young = handle_hva_to_gpa(kvm, start, end,
+ kvm_age_hva_handler, NULL);
+
+ if (flush && young > 0)
+ kvm_flush_remote_tlbs(kvm);
+
+ return young;
}
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
@@ -70,7 +70,8 @@
extern int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
-extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool flush);
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
@@ -841,9 +841,15 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
}
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool flush)
{
- return kvm->arch.kvm_ops->age_hva(kvm, start, end);
+ int young = kvm->arch.kvm_ops->age_hva(kvm, start, end);
+
+ if (flush && young > 0)
+ kvm_flush_remote_tlbs(kvm);
+
+ return young;
}
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
@@ -745,7 +745,8 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
return 0;
}
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool flush)
{
/* XXX could be more clever ;) */
return 0;
@@ -1519,7 +1519,8 @@ asmlinkage void kvm_spurious_fault(void);
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool flush);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
@@ -1977,9 +1977,15 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
KVM_PAGES_PER_HPAGE(sp->role.level));
}
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool flush)
{
- return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
+ int young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
+
+ if (flush && young > 0)
+ kvm_flush_remote_tlbs(kvm);
+
+ return young;
}
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
@@ -2107,12 +2107,21 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *
return pte_young(*pte);
}
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool flush)
{
+ int young;
+
if (!kvm->arch.pgd)
return 0;
trace_kvm_age_hva(start, end);
- return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
+
+ young = handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
+
+ if (flush && young > 0)
+ kvm_flush_remote_tlbs(kvm);
+
+ return young;
}
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
@@ -428,9 +428,7 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
- young = kvm_age_hva(kvm, start, end);
- if (young)
- kvm_flush_remote_tlbs(kvm);
+ young = kvm_age_hva(kvm, start, end, true);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
@@ -461,7 +459,7 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
* cadence. If we find this inaccurate, we might come up with a
* more sophisticated heuristic later.
*/
- young = kvm_age_hva(kvm, start, end);
+ young = kvm_age_hva(kvm, start, end, false);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);