@@ -8,4 +8,11 @@
void svm_post_hv_direct_flush(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ svm->vmcb->control.exit_code = HV_SVM_EXITCODE_ENL;
+ svm->vmcb->control.exit_code_hi = 0;
+ svm->vmcb->control.exit_info_1 = HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH;
+ svm->vmcb->control.exit_info_2 = 0;
+ nested_svm_vmexit(svm);
}
@@ -33,6 +33,9 @@ struct hv_enlightenments {
*/
#define VMCB_HV_NESTED_ENLIGHTENMENTS VMCB_SW
+#define HV_SVM_EXITCODE_ENL 0xF0000000
+#define HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH (1)
+
static inline void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -48,6 +51,22 @@ static inline void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu)
hv_vcpu->nested.vp_id = hve->hv_vp_id;
}
+static inline bool nested_svm_direct_flush_enabled(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct hv_enlightenments *hve =
+ (struct hv_enlightenments *)svm->nested.ctl.reserved_sw;
+ struct hv_vp_assist_page assist_page;
+
+ if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page)))
+ return false;
+
+ if (!hve->hv_enlightenments_control.nested_flush_hypercall)
+ return false;
+
+ return assist_page.nested_control.features.directhypercall;
+}
+
void svm_post_hv_direct_flush(struct kvm_vcpu *vcpu);
#endif /* __ARCH_X86_KVM_SVM_HYPERV_H__ */
@@ -170,7 +170,8 @@ void recalc_intercepts(struct vcpu_svm *svm)
}
/* We don't want to see VMMCALLs from a nested guest */
- vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
+ if (!nested_svm_direct_flush_enabled(&svm->vcpu))
+ vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
for (i = 0; i < MAX_INTERCEPT; i++)
c->intercepts[i] |= g->intercepts[i];
@@ -486,6 +487,17 @@ static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
{
+ /*
+ * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VPID or
+ * L2's VPID upon request from the guest. Make sure we check for
+ * pending entries for the case when the request got misplaced (e.g.
+ * a transition from L2->L1 happened while processing Direct TLB flush
+ * request or vice versa). kvm_hv_vcpu_flush_tlb() will not flush
+ * anything if there are no requests in the corresponding buffer.
+ */
+ if (to_hv_vcpu(vcpu))
+ kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
+
/*
* TODO: optimize unconditional TLB flush/MMU sync. A partial list of
* things to fix before this can be conditional:
@@ -1361,6 +1373,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
int nested_svm_exit_special(struct vcpu_svm *svm)
{
u32 exit_code = svm->vmcb->control.exit_code;
+ struct kvm_vcpu *vcpu = &svm->vcpu;
switch (exit_code) {
case SVM_EXIT_INTR:
@@ -1379,6 +1392,13 @@ int nested_svm_exit_special(struct vcpu_svm *svm)
return NESTED_EXIT_HOST;
break;
}
+ case SVM_EXIT_VMMCALL:
+ /* Hyper-V Direct TLB flush hypercall is handled by L0 */
+ if (kvm_hv_direct_tlb_flush_exposed(vcpu) &&
+ nested_svm_direct_flush_enabled(vcpu) &&
+ kvm_hv_is_tlb_flush_hcall(vcpu))
+ return NESTED_EXIT_HOST;
+ break;
default:
break;
}
Implement Hyper-V Direct TLB flush for nSVM feature. The feature needs to be enabled both in extended 'nested controls' in VMCB and partition assist page. According to TLFS, synthetic vmexit to L1 is performed with - HV_SVM_EXITCODE_ENL exit_code. - HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH exit_info_1. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> --- arch/x86/kvm/svm/hyperv.c | 7 +++++++ arch/x86/kvm/svm/hyperv.h | 19 +++++++++++++++++++ arch/x86/kvm/svm/nested.c | 22 +++++++++++++++++++++- 3 files changed, 47 insertions(+), 1 deletion(-)