@@ -712,11 +712,18 @@ static struct pending_irq *vgic_v2_lpi_to_pending(struct domain *d,
BUG();
}
+static int vgic_v2_lpi_get_priority(struct domain *d, unsigned int vlpi)
+{
+ /* Dummy function, no LPIs on a VGICv2. */
+ BUG();
+}
+
static const struct vgic_ops vgic_v2_ops = {
.vcpu_init = vgic_v2_vcpu_init,
.domain_init = vgic_v2_domain_init,
.domain_free = vgic_v2_domain_free,
.lpi_to_pending = vgic_v2_lpi_to_pending,
+ .lpi_get_priority = vgic_v2_lpi_get_priority,
.max_vcpus = 8,
};
@@ -1577,12 +1577,23 @@ static struct pending_irq *vgic_v3_lpi_to_pending(struct domain *d,
return pirq;
}
+/* Retrieve the priority of an LPI from its struct pending_irq. */
+static int vgic_v3_lpi_get_priority(struct domain *d, uint32_t vlpi)
+{
+ struct pending_irq *p = vgic_v3_lpi_to_pending(d, vlpi);
+
+ ASSERT(p);
+
+ return p->lpi_priority;
+}
+
static const struct vgic_ops v3_ops = {
.vcpu_init = vgic_v3_vcpu_init,
.domain_init = vgic_v3_domain_init,
.domain_free = vgic_v3_domain_free,
.emulate_reg = vgic_v3_emulate_reg,
.lpi_to_pending = vgic_v3_lpi_to_pending,
+ .lpi_get_priority = vgic_v3_lpi_get_priority,
/*
* We use both AFF1 and AFF0 in (v)MPIDR. Thus, the max number of CPU
* that can be supported is up to 4096(==256*16) in theory.
@@ -228,8 +228,13 @@ struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
static int vgic_get_virq_priority(struct vcpu *v, unsigned int virq)
{
- struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
+ struct vgic_irq_rank *rank;
+
+ /* LPIs don't have a rank, also store their priority separately. */
+ if ( is_lpi(virq) )
+ return v->domain->arch.vgic.handler->lpi_get_priority(v->domain, virq);
+ rank = vgic_rank_irq(v, virq);
return ACCESS_ONCE(rank->priority[virq & INTERRUPT_RANK_MASK]);
}
@@ -504,8 +509,6 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq)
unsigned long flags;
bool running;
- priority = vgic_get_virq_priority(v, virq);
-
spin_lock_irqsave(&v->arch.vgic.lock, flags);
n = irq_to_pending(v, virq);
@@ -531,6 +534,7 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq)
goto out;
}
+ priority = vgic_get_virq_priority(v, virq);
n->priority = priority;
/* the irq is enabled */
@@ -72,6 +72,7 @@ struct pending_irq
#define GIC_INVALID_LR (uint8_t)~0
uint8_t lr;
uint8_t priority;
+ uint8_t lpi_priority; /* Caches the priority if this is an LPI. */
/* inflight is used to append instances of pending_irq to
* vgic.inflight_irqs */
struct list_head inflight;
@@ -136,6 +137,7 @@ struct vgic_ops {
bool (*emulate_reg)(struct cpu_user_regs *regs, union hsr hsr);
/* lookup the struct pending_irq for a given LPI interrupt */
struct pending_irq *(*lpi_to_pending)(struct domain *d, unsigned int vlpi);
+ int (*lpi_get_priority)(struct domain *d, uint32_t vlpi);
/* Maximum number of vCPU supported */
const unsigned int max_vcpus;
};
We enhance struct pending_irq to cache the priority information for LPIs. Reading the information from there is faster than accessing the property table from guest memory. Also it use some padding area in the struct, so does not require more memory. This introduces the function to retrieve the LPI priority as a vgic_ops. Also this moves the vgic_get_virq_priority() call in vgic_vcpu_inject_irq() to happen after the NULL check of the pending_irq pointer, so we can rely on the pointer in the new function. Signed-off-by: Andre Przywara <andre.przywara@arm.com> --- xen/arch/arm/vgic-v2.c | 7 +++++++ xen/arch/arm/vgic-v3.c | 11 +++++++++++ xen/arch/arm/vgic.c | 10 +++++++--- xen/include/asm-arm/vgic.h | 2 ++ 4 files changed, 27 insertions(+), 3 deletions(-)