@@ -28,6 +28,11 @@ struct kvm_irqchip_flow {
int (*irqchip_vcpu_first_run)(struct kvm_vcpu *);
void (*irqchip_vcpu_flush_hwstate)(struct kvm_vcpu *);
void (*irqchip_vcpu_sync_hwstate)(struct kvm_vcpu *);
+ int (*irqchip_inject_irq)(struct kvm *, unsigned int cpu,
+ unsigned int intid, bool, void *);
+ int (*irqchip_inject_userspace_irq)(struct kvm *, unsigned int type,
+ unsigned int cpu,
+ unsigned int intid, bool);
};
/*
@@ -86,4 +91,10 @@ struct kvm_irqchip_flow {
#define kvm_irqchip_vcpu_sync_hwstate(v) \
__vcpu_irqchip_action((v), vcpu_sync_hwstate, (v))
+#define kvm_irqchip_inject_irq(k, ...) \
+ __kvm_irqchip_action_ret((k), inject_irq, (k), __VA_ARGS__)
+
+#define kvm_irqchip_inject_userspace_irq(k, ...) \
+ __kvm_irqchip_action_ret((k), inject_userspace_irq, (k), __VA_ARGS__)
+
#endif
@@ -388,10 +388,10 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
timer_ctx->irq.level);
if (!userspace_irqchip(vcpu->kvm)) {
- ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
- timer_ctx->irq.irq,
- timer_ctx->irq.level,
- timer_ctx);
+ ret = kvm_irqchip_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+ timer_ctx->irq.irq,
+ timer_ctx->irq.level,
+ timer_ctx);
WARN_ON(ret);
}
}
@@ -870,18 +870,14 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
if (!vcpu)
return -EINVAL;
- if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
- return -EINVAL;
-
- return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
+ return kvm_irqchip_inject_userspace_irq(kvm, irq_type, vcpu_idx,
+ irq_num, level);
case KVM_ARM_IRQ_TYPE_SPI:
if (!irqchip_in_kernel(kvm))
return -ENXIO;
- if (irq_num < VGIC_NR_PRIVATE_IRQS)
- return -EINVAL;
-
- return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
+ return kvm_irqchip_inject_userspace_irq(kvm, irq_type, 0,
+ irq_num, level);
}
return -EINVAL;
@@ -378,8 +378,8 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
pmu->irq_level = overflow;
if (likely(irqchip_in_kernel(vcpu->kvm))) {
- int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
- pmu->irq_num, overflow, pmu);
+ int ret = kvm_irqchip_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+ pmu->irq_num, overflow, pmu);
WARN_ON(ret);
}
}
@@ -27,6 +27,8 @@ static struct kvm_irqchip_flow vgic_irqchip_flow = {
.irqchip_vcpu_first_run = kvm_vgic_vcpu_first_run,
.irqchip_vcpu_flush_hwstate = kvm_vgic_flush_hwstate,
.irqchip_vcpu_sync_hwstate = kvm_vgic_sync_hwstate,
+ .irqchip_inject_irq = kvm_vgic_inject_irq,
+ .irqchip_inject_userspace_irq = kvm_vgic_inject_userspace_irq,
};
/*
@@ -434,7 +434,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
* level-sensitive interrupts. You can think of the level parameter as 1
* being HIGH and 0 being LOW and all devices being active-HIGH.
*/
-int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
+int kvm_vgic_inject_irq(struct kvm *kvm, unsigned int cpuid, unsigned int intid,
bool level, void *owner)
{
struct kvm_vcpu *vcpu;
@@ -476,6 +476,24 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
return 0;
}
+int kvm_vgic_inject_userspace_irq(struct kvm *kvm, unsigned int type,
+ unsigned int cpuid, unsigned int intid,
+ bool level)
+{
+ switch (type) {
+ case KVM_ARM_IRQ_TYPE_PPI:
+ if (intid < VGIC_NR_SGIS || intid >= VGIC_NR_PRIVATE_IRQS)
+ return -EINVAL;
+ return kvm_vgic_inject_irq(kvm, cpuid, intid, level, NULL);
+ case KVM_ARM_IRQ_TYPE_SPI:
+ if (intid < VGIC_NR_PRIVATE_IRQS)
+ return -EINVAL;
+ return kvm_vgic_inject_irq(kvm, 0, intid, level, NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
/* @irq->irq_lock must be held */
static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
unsigned int host_irq,
@@ -232,6 +232,12 @@ void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
void kvm_vgic_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_vgic_vcpu_unblocking(struct kvm_vcpu *vcpu);
+int kvm_vgic_inject_irq(struct kvm *kvm, unsigned int cpuid, unsigned int intid,
+ bool level, void *owner);
+int kvm_vgic_inject_userspace_irq(struct kvm *kvm, unsigned int type,
+ unsigned int cpuid, unsigned int intid,
+ bool level);
+
bool vgic_has_its(struct kvm *kvm);
int kvm_vgic_register_its_device(void);
void vgic_enable_lpis(struct kvm_vcpu *vcpu);
@@ -339,8 +339,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type);
int kvm_vgic_hyp_init(void);
void kvm_vgic_init_cpu_hardware(void);
-int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
- bool level, void *owner);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
u32 vintid, bool (*get_input_level)(int vindid));
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
As we continue abstracting away the VGIC, let's make a small change while we're at it: Let's offer two callbacks for "wired" interrupt injection: - Interrupts generated from the kernel itself - Interrupts generated by userspace via the KVM_IRQ_LINE ioctl The various checks are pushed into the vgic code. MSI injection, such as the one used by userspace to tickle the ITS are left alone for now. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/include/asm/kvm_irq.h | 11 +++++++++++ arch/arm64/kvm/arch_timer.c | 8 ++++---- arch/arm64/kvm/arm.c | 12 ++++-------- arch/arm64/kvm/pmu-emul.c | 4 ++-- arch/arm64/kvm/vgic/vgic-init.c | 2 ++ arch/arm64/kvm/vgic/vgic.c | 20 +++++++++++++++++++- arch/arm64/kvm/vgic/vgic.h | 6 ++++++ include/kvm/arm_vgic.h | 2 -- 8 files changed, 48 insertions(+), 17 deletions(-)