@@ -842,7 +842,7 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
entry = svm->avic_physical_id_entry;
if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
- true, pi_data.ir_data);
+ pi_data.ir_data);
irqfd->irq_bypass_data = pi_data.ir_data;
list_add(&irqfd->vcpu_list, &svm->ir_list);
@@ -851,8 +851,7 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
return irq_set_vcpu_affinity(host_irq, NULL);
}
-static inline int
-avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
+static inline int avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu)
{
int ret = 0;
struct amd_svm_iommu_ir *ir;
@@ -871,7 +870,7 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
return 0;
list_for_each_entry(ir, &svm->ir_list, node) {
- ret = amd_iommu_update_ga(cpu, r, ir->data);
+ ret = amd_iommu_update_ga(cpu, ir->data);
if (ret)
return ret;
}
@@ -933,7 +932,7 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
- avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
+ avic_update_iommu_vcpu_affinity(vcpu, h_physical_id);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
}
@@ -973,7 +972,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
*/
spin_lock_irqsave(&svm->ir_list_lock, flags);
- avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
+ avic_update_iommu_vcpu_affinity(vcpu, -1);
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
svm->avic_physical_id_entry = entry;
@@ -3957,7 +3957,7 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
return 0;
}
-int amd_iommu_update_ga(int cpu, bool is_run, void *data)
+int amd_iommu_update_ga(int cpu, void *data)
{
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
@@ -3974,8 +3974,10 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
APICID_TO_IRTE_DEST_LO(cpu);
entry->hi.fields.destination =
APICID_TO_IRTE_DEST_HI(cpu);
+ entry->lo.fields_vapic.is_run = true;
+ } else {
+ entry->lo.fields_vapic.is_run = false;
}
- entry->lo.fields_vapic.is_run = is_run;
return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry);
@@ -30,8 +30,7 @@ static inline void amd_iommu_detect(void) { }
/* IOMMU AVIC Function */
extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
-extern int
-amd_iommu_update_ga(int cpu, bool is_run, void *data);
+extern int amd_iommu_update_ga(int cpu, void *data);
extern int amd_iommu_activate_guest_mode(void *data);
extern int amd_iommu_deactivate_guest_mode(void *data);
@@ -44,8 +43,7 @@ amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
return 0;
}
-static inline int
-amd_iommu_update_ga(int cpu, bool is_run, void *data)
+static inline int amd_iommu_update_ga(int cpu, void *data)
{
return 0;
}
Infer whether or not a vCPU should be marked running from the validity of the pCPU on which it is running. amd_iommu_update_ga() already skips the IRTE update if the pCPU is invalid, i.e. passing %true for is_run with an invalid pCPU would be a blatant and egregrious KVM bug. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/kvm/svm/avic.c | 11 +++++------ drivers/iommu/amd/iommu.c | 6 ++++-- include/linux/amd-iommu.h | 6 ++---- 3 files changed, 11 insertions(+), 12 deletions(-)