@@ -360,7 +360,10 @@ static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
{
- return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
+ bool is_forwarded = (vgic_get_phys_irq(vcpu, irq) >= 0);
+
+ return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq) ||
+ (is_forwarded && vgic_dist_irq_is_pending(vcpu, irq));
}
static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
@@ -1295,6 +1298,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_lr vlr;
int lr;
+ bool is_forwarded = (vgic_get_phys_irq(vcpu, irq) >= 0);
/* Sanitize the input... */
BUG_ON(sgi_source_id & ~7);
@@ -1330,7 +1334,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
vlr.irq = irq;
vlr.source = sgi_source_id;
vlr.state = LR_STATE_PENDING;
- if (!vgic_irq_is_edge(vcpu, irq))
+ if (!vgic_irq_is_edge(vcpu, irq) && !is_forwarded)
vlr.state |= LR_EOI_INT;
vgic_set_lr(vcpu, lr, vlr);
@@ -1371,11 +1375,12 @@ static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
{
+ bool is_forwarded = (vgic_get_phys_irq(vcpu, irq) >= 0);
if (!vgic_can_sample_irq(vcpu, irq))
return true; /* level interrupt, already queued */
if (vgic_queue_irq(vcpu, 0, irq)) {
- if (vgic_irq_is_edge(vcpu, irq)) {
+ if (vgic_irq_is_edge(vcpu, irq) || is_forwarded) {
vgic_dist_irq_clear_pending(vcpu, irq);
vgic_cpu_irq_clear(vcpu, irq);
} else {
@@ -1636,14 +1641,17 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
int edge_triggered, level_triggered;
int enabled;
bool ret = true;
+ bool is_forwarded;
spin_lock(&dist->lock);
vcpu = kvm_get_vcpu(kvm, cpuid);
+ is_forwarded = (vgic_get_phys_irq(vcpu, irq_num) >= 0);
+
edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
level_triggered = !edge_triggered;
- if (!vgic_validate_injection(vcpu, irq_num, level)) {
+ if (!vgic_validate_injection(vcpu, irq_num, level) && !is_forwarded) {
ret = false;
goto out;
}
Fix multiple injection of level sensitive forwarded IRQs. With current code, the second injection fails since the state bitmaps are not reset (process_maintenance is not called anymore). New implementation follows those principles: - A forwarded IRQ only can be sampled when it is pending - when queueing the IRQ (programming the LR), the pending state is removed as for edge sensitive IRQs - an injection of a forwarded IRQ is considered always valid since coming from the HW and level always is 1. Signed-off-by: Eric Auger <eric.auger@linaro.org> --- v2 -> v3: - integration in new vgic_can_sample_irq - remove the pending state when programming the LR v1 -> v2: - fix vgic state bypass in vgic_queue_hwirq Conflicts: virt/kvm/arm/vgic.c --- virt/kvm/arm/vgic.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-)