@@ -368,7 +368,6 @@ void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
to->pause_filter_count = from->pause_filter_count;
to->pause_filter_thresh = from->pause_filter_thresh;
- /* Copy asid here because nested_vmcb_check_controls will check it. */
to->asid = from->asid;
to->msrpm_base_pa &= ~0x0fffULL;
to->iopm_base_pa &= ~0x0fffULL;
@@ -508,6 +507,10 @@ static void nested_svm_entry_tlb_flush(struct kvm_vcpu *vcpu)
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
}
+ if (svm->nested.ctl.asid != svm->nested.last_asid) {
+ svm->nested.last_asid = svm->nested.ctl.asid;
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
+ }
/*
* TODO: optimize unconditional TLB flush/MMU sync. A partial list of
* things to fix before this can be conditional:
@@ -212,6 +212,8 @@ struct svm_nested_state {
* on its side.
*/
bool force_msr_bitmap_recalc;
+
+ u32 last_asid;
};
struct vcpu_sev_es_state {