@@ -491,7 +491,6 @@ static void nested_svm_entry_tlb_flush(struct kvm_vcpu *vcpu)
* TODO: optimize unconditional TLB flush/MMU sync. A partial list of
* things to fix before this can be conditional:
*
- * - Flush TLBs for both L1 and L2 remote TLB flush
* - Honor L1's request to flush an ASID on nested VMRUN
* - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
* - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
@@ -4044,7 +4044,9 @@ static void svm_flush_tlb_all(struct kvm_vcpu *vcpu)
if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu)))
hv_flush_remote_tlbs(vcpu->kvm);
- svm_flush_tlb_asid(vcpu, svm->current_vmcb);
+ svm_flush_tlb_asid(vcpu, &svm->vmcb01);
+ if (svm->nested.initialized)
+ svm_flush_tlb_asid(vcpu, &svm->nested.vmcb02);
}
static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
KVM_REQ_TLB_FLUSH is used to flush all TLB entries for all contexts (e.g. in kvm_flush_remote_tlbs()). Flush both L1 and L2 ASIDs in svm_flush_tlb_all() to handle it appropriately. This is currently not required as nested transitions do unconditional TLB flushes, but this is a step toward eliminating that. Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> --- arch/x86/kvm/svm/nested.c | 1 - arch/x86/kvm/svm/svm.c | 4 +++- 2 files changed, 3 insertions(+), 2 deletions(-)