@@ -484,19 +484,35 @@ static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
static void nested_svm_entry_tlb_flush(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
/* Handle pending Hyper-V TLB flush requests */
kvm_hv_nested_transtion_tlb_flush(vcpu, npt_enabled);
+ /*
+ * If L1 requested a TLB flush for L2, flush L2's TLB on nested entry
+ * and sync the nested NPT MMU, as TLB_CONTROL also flushes NPT
+ * guest-physical mappings. We technically only need to flush guest_mode
+ * page tables.
+ *
+ * If L1 requested a full TLB flush for all ASIDs, L1's own ASID is also
+ * flushed in nested_svm_exit_tlb_flush() before running L1.
+ *
+ * Note that TLB_CONTROL_FLUSH_ASID_LOCAL is handled exactly like
+ * TLB_CONTROL_FLUSH_ASID. We can technically flush less TLB entries,
+ * but this would require significantly more complexity.
+ */
+ if (svm->nested.ctl.tlb_ctl != TLB_CONTROL_DO_NOTHING) {
+ if (nested_npt_enabled(svm))
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
+ }
+
/*
* TODO: optimize unconditional TLB flush/MMU sync. A partial list of
* things to fix before this can be conditional:
*
- * - Honor L1's request to flush an ASID on nested VMRUN
- * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
* - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
- *
- * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
- * NPT guest-physical mappings on VMRUN.
*/
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
@@ -505,8 +521,13 @@ static void nested_svm_entry_tlb_flush(struct kvm_vcpu *vcpu)
/* See nested_svm_entry_tlb_flush() */
static void nested_svm_exit_tlb_flush(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
kvm_hv_nested_transtion_tlb_flush(vcpu, npt_enabled);
+ if (svm->nested.ctl.tlb_ctl == TLB_CONTROL_FLUSH_ALL_ASID)
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
+
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
@@ -5332,9 +5332,8 @@ static __init void svm_set_cpu_caps(void)
kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
/*
- * KVM currently flushes TLBs on *every* nested SVM transition,
- * and so for all intents and purposes KVM supports flushing by
- * ASID, i.e. KVM is guaranteed to honor every L1 ASID flush.
+ * KVM handles all TLB_CONTROL values set by L1, even if the
+ * underlying CPU does not. See nested_svm_entry_tlb_flush().
*/
kvm_cpu_cap_set(X86_FEATURE_FLUSHBYASID);