@@ -240,7 +240,7 @@ static void sev_asid_free(struct kvm_sev_info *sev)
for_each_possible_cpu(cpu) {
sd = per_cpu_ptr(&svm_data, cpu);
- sd->sev_vmcbs[sev->asid] = NULL;
+ sd->sev_vcpus[sev->asid] = NULL;
}
mutex_unlock(&sev_bitmap_lock);
@@ -3081,8 +3081,8 @@ int sev_cpu_init(struct svm_cpu_data *sd)
if (!sev_enabled)
return 0;
- sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
- if (!sd->sev_vmcbs)
+ sd->sev_vcpus = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
+ if (!sd->sev_vcpus)
return -ENOMEM;
return 0;
@@ -3471,14 +3471,14 @@ int pre_sev_run(struct vcpu_svm *svm, int cpu)
/*
* Flush guest TLB:
*
- * 1) when different VMCB for the same ASID is to be run on the same host CPU.
+ * 1) when different vCPU for the same ASID is to be run on the same host CPU.
* 2) or this VMCB was executed on different host CPU in previous VMRUNs.
*/
- if (sd->sev_vmcbs[asid] == svm->vmcb &&
+ if (sd->sev_vcpus[asid] == &svm->vcpu &&
svm->vcpu.arch.last_vmentry_cpu == cpu)
return 0;
- sd->sev_vmcbs[asid] = svm->vmcb;
+ sd->sev_vcpus[asid] = &svm->vcpu;
vmcb_set_flush_asid(svm->vmcb);
vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
return 0;
@@ -694,7 +694,7 @@ static void svm_cpu_uninit(int cpu)
if (!sd->save_area)
return;
- kfree(sd->sev_vmcbs);
+ kfree(sd->sev_vcpus);
__free_page(__sme_pa_to_page(sd->save_area_pa));
sd->save_area_pa = 0;
sd->save_area = NULL;
@@ -340,8 +340,8 @@ struct svm_cpu_data {
struct vmcb *current_vmcb;
- /* index = sev_asid, value = vmcb pointer */
- struct vmcb **sev_vmcbs;
+ /* index = sev_asid, value = vcpu pointer */
+ struct kvm_vcpu **sev_vcpus;
};
DECLARE_PER_CPU(struct svm_cpu_data, svm_data);
SEV currently tracks the ASID to VMCB mapping for each physical CPU. This is required to flush the ASID when a new VMCB using the same ASID is run on the same CPU. Practically, there is a single VMCB for each vCPU using SEV. Furthermore, TLB flushes on nested transitions between VMCB01 and VMCB02 are handled separately (see nested_svm_transition_tlb_flush()). In preparation for generalizing the tracking and making the tracking more expensive, start tracking the ASID to vCPU mapping instead. This will allow for the tracking to be moved to a cheaper code path when vCPUs are switched. Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> --- arch/x86/kvm/svm/sev.c | 12 ++++++------ arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/svm/svm.h | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-)