@@ -1217,6 +1217,12 @@ static int __init gicv2_init(void)
return 0;
}
+static void gicv2_do_LPI(unsigned int lpi)
+{
+ /* No LPIs in a GICv2 */
+ BUG();
+}
+
const static struct gic_hw_operations gicv2_ops = {
.info = &gicv2_info,
.init = gicv2_init,
@@ -1244,6 +1250,7 @@ const static struct gic_hw_operations gicv2_ops = {
.make_hwdom_madt = gicv2_make_hwdom_madt,
.map_hwdom_extra_mappings = gicv2_map_hwdown_extra_mappings,
.iomem_deny_access = gicv2_iomem_deny_access,
+ .do_LPI = gicv2_do_LPI,
};
/* Set up the GIC */
@@ -136,6 +136,85 @@ uint64_t gicv3_get_redist_address(unsigned int cpu, bool use_pta)
return per_cpu(lpi_redist, cpu).redist_id << 16;
}
+void vgic_vcpu_inject_lpi(struct domain *d, unsigned int virq)
+{
+ /*
+ * TODO: this assumes that the struct pending_irq stays valid all of
+ * the time. We cannot properly protect this with the current locking
+ * scheme, but the future per-IRQ lock will solve this problem.
+ */
+ struct pending_irq *p = irq_to_pending(d->vcpu[0], virq);
+ unsigned int vcpu_id;
+
+ if ( !p )
+ return;
+
+ vcpu_id = ACCESS_ONCE(p->lpi_vcpu_id);
+ if ( vcpu_id >= d->max_vcpus )
+ return;
+
+ vgic_vcpu_inject_irq(d->vcpu[vcpu_id], virq);
+}
+
+/*
+ * Handle incoming LPIs, which are a bit special, because they are potentially
+ * numerous and also only get injected into guests. Treat them specially here,
+ * by just looking up their target vCPU and virtual LPI number and hand it
+ * over to the injection function.
+ * Please note that LPIs are edge-triggered only, also have no active state,
+ * so spurious interrupts on the host side are no issue (we can just ignore
+ * them).
+ * Also a guest cannot expect that firing interrupts that haven't been
+ * fully configured yet will reach the CPU, so we don't need to care about
+ * this special case.
+ */
+void gicv3_do_LPI(unsigned int lpi)
+{
+ struct domain *d;
+ union host_lpi *hlpip, hlpi;
+
+ irq_enter();
+
+ /* EOI the LPI already. */
+ WRITE_SYSREG32(lpi, ICC_EOIR1_EL1);
+
+ /* Find out if a guest mapped something to this physical LPI. */
+ hlpip = gic_get_host_lpi(lpi);
+ if ( !hlpip )
+ goto out;
+
+ hlpi.data = read_u64_atomic(&hlpip->data);
+
+ /*
+ * Unmapped events are marked with an invalid LPI ID. We can safely
+ * ignore them, as they have no further state and no-one can expect
+ * to see them if they have not been mapped.
+ */
+ if ( hlpi.virt_lpi == INVALID_LPI )
+ goto out;
+
+ d = rcu_lock_domain_by_id(hlpi.dom_id);
+ if ( !d )
+ goto out;
+
+ /*
+ * TODO: Investigate what to do here for potential interrupt storms.
+ * As we keep all host LPIs enabled, for disabling LPIs we would need
+ * to queue a ITS host command, which we avoid so far during a guest's
+ * runtime. Also re-enabling would trigger a host command upon the
+ * guest sending a command, which could be an attack vector for
+ * hogging the host command queue.
+ * See the thread around here for some background:
+ * https://lists.xen.org/archives/html/xen-devel/2016-12/msg00003.html
+ */
+ vgic_vcpu_inject_lpi(d, hlpi.virt_lpi);
+
+ rcu_unlock_domain(d);
+
+out:
+ irq_exit();
+}
+
static int gicv3_lpi_allocate_pendtable(uint64_t *reg)
{
uint64_t val;
@@ -1692,6 +1692,7 @@ static const struct gic_hw_operations gicv3_ops = {
.make_hwdom_dt_node = gicv3_make_hwdom_dt_node,
.make_hwdom_madt = gicv3_make_hwdom_madt,
.iomem_deny_access = gicv3_iomem_deny_access,
+ .do_LPI = gicv3_do_LPI,
};
static int __init gicv3_dt_preinit(struct dt_device_node *node, const void *data)
@@ -732,7 +732,13 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq)
do_IRQ(regs, irq, is_fiq);
local_irq_disable();
}
- else if (unlikely(irq < 16))
+ else if ( is_lpi(irq) )
+ {
+ local_irq_enable();
+ gic_hw_ops->do_LPI(irq);
+ local_irq_disable();
+ }
+ else if ( unlikely(irq < 16) )
{
do_sgi(regs, irq);
}
@@ -260,7 +260,8 @@ struct arch_vcpu
/* GICv3: redistributor base and flags for this vCPU */
paddr_t rdist_base;
-#define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */
+#define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */
+#define VGIC_V3_LPIS_ENABLED (1 << 1)
uint8_t flags;
} vgic;
@@ -367,6 +367,8 @@ struct gic_hw_operations {
int (*map_hwdom_extra_mappings)(struct domain *d);
/* Deny access to GIC regions */
int (*iomem_deny_access)(const struct domain *d);
+ /* Handle LPIs, which require special handling */
+ void (*do_LPI)(unsigned int lpi);
};
void register_gic_ops(const struct gic_hw_operations *ops);
@@ -134,6 +134,8 @@ void gicv3_its_dt_init(const struct dt_device_node *node);
bool gicv3_its_host_has_its(void);
+void gicv3_do_LPI(unsigned int lpi);
+
int gicv3_lpi_init_rdist(void __iomem * rdist_base);
/* Initialize the host structures for LPIs and the host ITSes. */
@@ -164,6 +166,8 @@ int gicv3_its_map_guest_device(struct domain *d,
int gicv3_allocate_host_lpi_block(struct domain *d, uint32_t *first_lpi);
void gicv3_free_host_lpi_block(uint32_t first_lpi);
+void vgic_vcpu_inject_lpi(struct domain *d, unsigned int virq);
+
#else
static inline void gicv3_its_dt_init(const struct dt_device_node *node)
@@ -175,6 +179,12 @@ static inline bool gicv3_its_host_has_its(void)
return false;
}
+static inline void gicv3_do_LPI(unsigned int lpi)
+{
+ /* We don't enable LPIs without an ITS. */
+ BUG();
+}
+
static inline int gicv3_lpi_init_rdist(void __iomem * rdist_base)
{
return -ENODEV;