@@ -1026,6 +1026,8 @@ struct kvm_arch {
struct kvm_pmu_event_filter *pmu_event_filter;
struct task_struct *nx_lpage_recovery_thread;
+
+ refcount_t kvmi_refcount;
};
struct kvm_vm_stat {
@@ -640,6 +640,25 @@ static void kvmi_arch_restore_interception(struct kvm_vcpu *vcpu)
kvmi_arch_disable_msrw_intercept(vcpu, arch_vcpui->msrw.kvmi_mask.high);
}
+void kvmi_arch_restore_ept_view(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ u16 view, default_view = 0;
+ bool visible = false;
+
+ if (kvm_get_ept_view(vcpu) != default_view)
+ kvmi_arch_cmd_set_ept_view(vcpu, default_view);
+
+ for (view = 0; view < KVM_MAX_EPT_VIEWS; view++)
+ kvmi_arch_cmd_control_ept_view(vcpu, view, visible);
+
+ if (refcount_dec_and_test(&kvm->arch.kvmi_refcount)) {
+ u16 zap_mask = ~(1 << default_view);
+
+ kvm_mmu_zap_all(vcpu->kvm, zap_mask);
+ }
+}
+
bool kvmi_arch_clean_up_interception(struct kvm_vcpu *vcpu)
{
struct kvmi_interception *arch_vcpui = vcpu->arch.kvmi;
@@ -647,8 +666,10 @@ bool kvmi_arch_clean_up_interception(struct kvm_vcpu *vcpu)
if (!arch_vcpui || !arch_vcpui->cleanup)
return false;
- if (arch_vcpui->restore_interception)
+ if (arch_vcpui->restore_interception) {
kvmi_arch_restore_interception(vcpu);
+ kvmi_arch_restore_ept_view(vcpu);
+ }
return true;
}
@@ -288,6 +288,9 @@ static void free_kvmi(struct kvm *kvm)
kvmi_clear_mem_access(kvm);
+ refcount_set(&kvm->arch.kvmi_refcount,
+ atomic_read(&kvm->online_vcpus));
+
kvm_for_each_vcpu(i, vcpu, kvm)
free_vcpui(vcpu, restore_interception);