@@ -1559,6 +1559,7 @@ struct kvm_x86_nested_ops {
int (*enable_evmcs)(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version);
uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu);
+ void (*post_hv_direct_flush)(struct kvm_vcpu *vcpu);
};
struct kvm_x86_init_ops {
@@ -25,7 +25,8 @@ kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o
-kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o
+kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o \
+ svm/sev.o svm/hyperv.o
ifdef CONFIG_HYPERV
kvm-amd-y += svm/svm_onhyperv.o
new file mode 100644
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD SVM specific code for Hyper-V on KVM.
+ *
+ * Copyright 2022 Red Hat, Inc. and/or its affiliates.
+ */
+#include "hyperv.h"
+
+void svm_post_hv_direct_flush(struct kvm_vcpu *vcpu)
+{
+}
@@ -48,4 +48,6 @@ static inline void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu)
hv_vcpu->nested.vp_id = hve->hv_vp_id;
}
+void svm_post_hv_direct_flush(struct kvm_vcpu *vcpu);
+
#endif /* __ARCH_X86_KVM_SVM_HYPERV_H__ */
@@ -1668,4 +1668,5 @@ struct kvm_x86_nested_ops svm_nested_ops = {
.get_nested_state_pages = svm_get_nested_state_pages,
.get_state = svm_get_nested_state,
.set_state = svm_set_nested_state,
+ .post_hv_direct_flush = svm_post_hv_direct_flush,
};
@@ -437,3 +437,7 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
return 0;
}
+
+void vmx_post_hv_direct_flush(struct kvm_vcpu *vcpu)
+{
+}
@@ -244,5 +244,6 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version);
void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata);
int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
+void vmx_post_hv_direct_flush(struct kvm_vcpu *vcpu);
#endif /* __KVM_X86_VMX_EVMCS_H */
@@ -6827,4 +6827,5 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
.write_log_dirty = nested_vmx_write_pml_buffer,
.enable_evmcs = nested_enable_evmcs,
.get_evmcs_version = nested_get_evmcs_version,
+ .post_hv_direct_flush = vmx_post_hv_direct_flush,
};
Hyper-V supports injecting synthetic L2->L1 exit after performing Direct TLB flush operation but the procedure is vendor specific. Introduce .post_hv_direct_flush() nested hook for it. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/Makefile | 3 ++- arch/x86/kvm/svm/hyperv.c | 11 +++++++++++ arch/x86/kvm/svm/hyperv.h | 2 ++ arch/x86/kvm/svm/nested.c | 1 + arch/x86/kvm/vmx/evmcs.c | 4 ++++ arch/x86/kvm/vmx/evmcs.h | 1 + arch/x86/kvm/vmx/nested.c | 1 + 8 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 arch/x86/kvm/svm/hyperv.c