@@ -35,6 +35,7 @@ struct amd_iommu_pi_data {
u64 vapic_addr; /* Physical address of the vCPU's vAPIC. */
u32 ga_tag;
u32 vector; /* Guest vector of the interrupt */
+ int cpu;
bool is_guest_mode;
void *ir_data;
};
@@ -735,6 +735,7 @@ void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
{
+ int apic_id = kvm_cpu_get_apicid(vcpu->cpu);
int ret = 0;
unsigned long flags;
struct amd_svm_iommu_ir *ir;
@@ -754,7 +755,7 @@ static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
list_for_each_entry(ir, &svm->ir_list, node) {
if (activate)
- ret = amd_iommu_activate_guest_mode(ir->data);
+ ret = amd_iommu_activate_guest_mode(ir->data, apic_id);
else
ret = amd_iommu_deactivate_guest_mode(ir->data);
if (ret)
@@ -819,6 +820,18 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
*/
guard(spinlock_irqsave)(&svm->ir_list_lock);
+ /*
+ * Update the target pCPU for IOMMU doorbells if the vCPU is
+ * running. If the vCPU is NOT running, i.e. is blocking or
+ * scheduled out, KVM will update the pCPU info when the vCPU
+ * is awakened and/or scheduled in. See also avic_vcpu_load().
+ */
+ entry = svm->avic_physical_id_entry;
+ if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
+ pi_data.cpu = entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
+ else
+ pi_data.cpu = -1;
+
ret = irq_set_vcpu_affinity(host_irq, &pi_data);
if (ret)
return ret;
@@ -833,17 +846,6 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
return -EIO;
}
- /*
- * Update the target pCPU for IOMMU doorbells if the vCPU is
- * running. If the vCPU is NOT running, i.e. is blocking or
- * scheduled out, KVM will update the pCPU info when the vCPU
- * is awakened and/or scheduled in. See also avic_vcpu_load().
- */
- entry = svm->avic_physical_id_entry;
- if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
- amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
- pi_data.ir_data);
-
irqfd->irq_bypass_data = pi_data.ir_data;
list_add(&irqfd->vcpu_list, &svm->ir_list);
return 0;
@@ -3807,7 +3807,7 @@ int amd_iommu_update_ga(int cpu, void *data)
}
EXPORT_SYMBOL(amd_iommu_update_ga);
-int amd_iommu_activate_guest_mode(void *data)
+int amd_iommu_activate_guest_mode(void *data, int cpu)
{
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
@@ -3828,6 +3828,8 @@ int amd_iommu_activate_guest_mode(void *data)
entry->hi.fields.vector = ir_data->ga_vector;
entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
+ __amd_iommu_update_ga(entry, cpu);
+
return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry);
}
@@ -3894,7 +3896,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *info)
ir_data->ga_root_ptr = (pi_data->vapic_addr >> 12);
ir_data->ga_vector = pi_data->vector;
ir_data->ga_tag = pi_data->ga_tag;
- ret = amd_iommu_activate_guest_mode(ir_data);
+ ret = amd_iommu_activate_guest_mode(ir_data, pi_data->cpu);
} else {
ret = amd_iommu_deactivate_guest_mode(ir_data);
}
@@ -32,7 +32,7 @@ extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
extern int amd_iommu_update_ga(int cpu, void *data);
-extern int amd_iommu_activate_guest_mode(void *data);
+extern int amd_iommu_activate_guest_mode(void *data, int cpu);
extern int amd_iommu_deactivate_guest_mode(void *data);
#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
@@ -48,7 +48,7 @@ static inline int amd_iommu_update_ga(int cpu, void *data)
return 0;
}
-static inline int amd_iommu_activate_guest_mode(void *data)
+static inline int amd_iommu_activate_guest_mode(void *data, int cpu)
{
return 0;
}
Now that setting vCPU affinity is guarded with ir_list_lock, i.e. now that avic_physical_id_entry can be safely accessed, set the pCPU info straight-away when setting vCPU affinity. Putting the IRTE into posted mode, and then immediately updating the IRTE a second time if the target vCPU is running is wasteful and confusing. This also fixes a flaw where a posted IRQ that arrives between putting the IRTE into guest_mode and setting the correct destination could cause the IOMMU to ring the doorbell on the wrong pCPU. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/include/asm/irq_remapping.h | 1 + arch/x86/kvm/svm/avic.c | 26 ++++++++++++++------------ drivers/iommu/amd/iommu.c | 6 ++++-- include/linux/amd-iommu.h | 4 ++-- 4 files changed, 21 insertions(+), 16 deletions(-)