@@ -2313,7 +2313,7 @@ static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
return;
if (local_flush)
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
#ifdef CONFIG_KVM_MMU_AUDIT
@@ -2520,11 +2520,11 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
break;
WARN_ON(!list_empty(&invalid_list));
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
if (sp->unsync_children)
- kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
__clear_sp_write_flooding_count(sp);
trace_kvm_mmu_get_page(sp, false);
@@ -3125,7 +3125,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
if (write_fault)
ret = RET_PF_EMULATE;
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
@@ -4314,7 +4314,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
if (!skip_tlb_flush) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
/*
@@ -5177,7 +5177,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
if (r)
goto out;
kvm_mmu_load_pgd(vcpu);
- kvm_x86_ops->tlb_flush_all(vcpu);
+ kvm_x86_ops->tlb_flush_current(vcpu);
out:
return r;
}
Flush only the current ASID/context when requesting a TLB flush due to a change in the current vCPU's MMU to avoid blasting away TLB entries associated with other ASIDs/contexts, e.g. entries cached for L1 when a change in L2's MMU requires a flush. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kvm/mmu/mmu.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-)