@@ -820,7 +820,8 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
return irq_set_vcpu_affinity(host_irq, NULL);
}
-static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu)
+static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu,
+ bool toggle_avic)
{
struct amd_svm_iommu_ir *ir;
struct vcpu_svm *svm = to_svm(vcpu);
@@ -834,11 +835,17 @@ static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu)
if (list_empty(&svm->ir_list))
return;
- list_for_each_entry(ir, &svm->ir_list, node)
- WARN_ON_ONCE(amd_iommu_update_ga(cpu, ir->data));
+ list_for_each_entry(ir, &svm->ir_list, node) {
+ if (!toggle_avic)
+ WARN_ON_ONCE(amd_iommu_update_ga(cpu, ir->data));
+ else if (cpu >= 0)
+ WARN_ON_ONCE(amd_iommu_activate_guest_mode(ir->data, cpu));
+ else
+ WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(ir->data));
+ }
}
-static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu, bool toggle_avic)
{
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
int h_physical_id = kvm_cpu_get_apicid(cpu);
@@ -883,7 +890,7 @@ static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
- avic_update_iommu_vcpu_affinity(vcpu, h_physical_id);
+ avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, toggle_avic);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
}
@@ -900,10 +907,10 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (kvm_vcpu_is_blocking(vcpu))
return;
- __avic_vcpu_load(vcpu, cpu);
+ __avic_vcpu_load(vcpu, cpu, false);
}
-static void __avic_vcpu_put(struct kvm_vcpu *vcpu)
+static void __avic_vcpu_put(struct kvm_vcpu *vcpu, bool toggle_avic)
{
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
struct vcpu_svm *svm = to_svm(vcpu);
@@ -925,7 +932,7 @@ static void __avic_vcpu_put(struct kvm_vcpu *vcpu)
*/
spin_lock_irqsave(&svm->ir_list_lock, flags);
- avic_update_iommu_vcpu_affinity(vcpu, -1);
+ avic_update_iommu_vcpu_affinity(vcpu, -1, toggle_avic);
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
svm->avic_physical_id_entry = entry;
@@ -951,7 +958,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
return;
- __avic_vcpu_put(vcpu);
+ __avic_vcpu_put(vcpu, false);
}
void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
@@ -980,39 +987,15 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
{
- bool activated = kvm_vcpu_apicv_active(vcpu);
- int apic_id = kvm_cpu_get_apicid(vcpu->cpu);
- struct vcpu_svm *svm = to_svm(vcpu);
- struct amd_svm_iommu_ir *ir;
- unsigned long flags;
-
if (!enable_apicv)
return;
avic_refresh_virtual_apic_mode(vcpu);
- if (activated)
- __avic_vcpu_load(vcpu, vcpu->cpu);
+ if (kvm_vcpu_apicv_active(vcpu))
+ __avic_vcpu_load(vcpu, vcpu->cpu, true);
else
- __avic_vcpu_put(vcpu);
-
- /*
- * Here, we go through the per-vcpu ir_list to update all existing
- * interrupt remapping table entry targeting this vcpu.
- */
- spin_lock_irqsave(&svm->ir_list_lock, flags);
-
- if (list_empty(&svm->ir_list))
- goto out;
-
- list_for_each_entry(ir, &svm->ir_list, node) {
- if (activated)
- WARN_ON_ONCE(amd_iommu_activate_guest_mode(ir->data, apic_id));
- else
- WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(ir->data));
- }
-out:
- spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+ __avic_vcpu_put(vcpu, true);
}
void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
Fold the IRTE modification logic in avic_refresh_apicv_exec_ctrl() into __avic_vcpu_{load,put}(), and add a param to the helpers to communicate whether or not AVIC is being toggled, i.e. if IRTE needs a "full" update, or just a quick update to set the CPU and IsRun. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/kvm/svm/avic.c | 55 ++++++++++++++--------------------------- 1 file changed, 19 insertions(+), 36 deletions(-)