@@ -6,10 +6,12 @@
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
+#include <linux/irq.h>
#include <linux/kvm_host.h>
#include <kvm/arm_vgic.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_nested.h>
#include "vgic.h"
/*
@@ -222,6 +224,16 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
if (!irqchip_in_kernel(vcpu->kvm))
return 0;
+ if (vcpu_has_nv(vcpu)) {
+ /* Cope with vintage userspace. Maybe we should fail instead */
+ if (vcpu->kvm->arch.vgic.maint_irq == 0)
+ vcpu->kvm->arch.vgic.maint_irq = kvm_vgic_global_state.maint_irq;
+ ret = kvm_vgic_set_owner(vcpu, vcpu->kvm->arch.vgic.maint_irq,
+ vcpu);
+ if (ret)
+ return ret;
+ }
+
/*
* If we are creating a VCPU with a GICv3 we must also register the
* KVM io device for the redistributor that belongs to this VCPU.
@@ -478,12 +490,23 @@ void kvm_vgic_cpu_down(void)
static irqreturn_t vgic_maintenance_handler(int irq, void *data)
{
+ struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)data;
+
/*
* We cannot rely on the vgic maintenance interrupt to be
* delivered synchronously. This means we can only use it to
* exit the VM, and we perform the handling of EOIed
* interrupts on the exit path (see vgic_fold_lr_state).
*/
+
+ /* If not nested, deactivate */
+ if (!vcpu || !vgic_state_is_nested(vcpu)) {
+ irq_set_irqchip_state(irq, IRQCHIP_STATE_ACTIVE, false);
+ return IRQ_HANDLED;
+ }
+
+ /* Assume nested from now */
+ vgic_v3_handle_nested_maint_irq(vcpu);
return IRQ_HANDLED;
}
@@ -582,6 +605,16 @@ int kvm_vgic_hyp_init(void)
return ret;
}
+ if (has_mask) {
+ ret = irq_set_vcpu_affinity(kvm_vgic_global_state.maint_irq,
+ kvm_get_running_vcpus());
+ if (ret) {
+ kvm_err("Error setting vcpu affinity\n");
+ free_percpu_irq(kvm_vgic_global_state.maint_irq, kvm_get_running_vcpus());
+ return ret;
+ }
+ }
+
kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
return 0;
}
@@ -175,10 +175,20 @@ void vgic_v3_sync_nested(struct kvm_vcpu *vcpu)
void vgic_v3_load_nested(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_irq *irq;
+ unsigned long flags;
vgic_cpu->shadow_vgic_v3 = vgic_cpu->nested_vgic_v3;
vgic_v3_create_shadow_lr(vcpu);
__vgic_v3_restore_state(vcpu_shadow_if(vcpu));
+
+ irq = vgic_get_irq(vcpu->kvm, vcpu, vcpu->kvm->arch.vgic.maint_irq);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ if (irq->line_level || irq->active)
+ irq_set_irqchip_state(kvm_vgic_global_state.maint_irq,
+ IRQCHIP_STATE_ACTIVE, true);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->kvm, irq);
}
void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
@@ -193,20 +203,26 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
*/
vgic_v3_fixup_shadow_lr_state(vcpu);
vgic_cpu->nested_vgic_v3 = vgic_cpu->shadow_vgic_v3;
+ irq_set_irqchip_state(kvm_vgic_global_state.maint_irq,
+ IRQCHIP_STATE_ACTIVE, false);
}
void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu)
{
- struct vgic_v3_cpu_if *cpu_if = vcpu_nested_if(vcpu);
-
/*
* If we exit a nested VM with a pending maintenance interrupt from the
* GIC, then we need to forward this to the guest hypervisor so that it
* can re-sync the appropriate LRs and sample level triggered interrupts
* again.
*/
- if (vgic_state_is_nested(vcpu) &&
- (cpu_if->vgic_hcr & ICH_HCR_EN) &&
- vgic_v3_get_misr(vcpu))
- kvm_inject_nested_irq(vcpu);
+ if (vgic_state_is_nested(vcpu)) {
+ struct vgic_v3_cpu_if *cpu_if = vcpu_nested_if(vcpu);
+ bool state;
+
+ state = cpu_if->vgic_hcr & ICH_HCR_EN;
+ state &= vgic_v3_get_misr(vcpu);
+
+ kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+ vcpu->kvm->arch.vgic.maint_irq, state, vcpu);
+ }
}
When we take a maintenance interrupt, we need to decide whether it is generated on an action from the guest, or if it is something that needs to be forwarded to the guest hypervisor. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/kvm/vgic/vgic-init.c | 33 ++++++++++++++++++++++++++++ arch/arm64/kvm/vgic/vgic-v3-nested.c | 28 ++++++++++++++++++----- 2 files changed, 55 insertions(+), 6 deletions(-)