diff mbox

[v10,28/32] ARM: vITS: handle INV command

Message ID 20170526173540.10066-29-andre.przywara@arm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andre Przywara May 26, 2017, 5:35 p.m. UTC
The INV command instructs the ITS to update the configuration data for
a given LPI by re-reading its entry from the property table.
We don't need to care so much about the priority value, but enabling
or disabling an LPI has some effect: We remove or push virtual LPIs
to their VCPUs, also check the virtual pending bit if an LPI gets enabled.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
 xen/arch/arm/vgic-v3-its.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 70 insertions(+)

Comments

Stefano Stabellini May 30, 2017, 10:23 p.m. UTC | #1
On Fri, 26 May 2017, Andre Przywara wrote:
> The INV command instructs the ITS to update the configuration data for
> a given LPI by re-reading its entry from the property table.
> We don't need to care so much about the priority value, but enabling
> or disabling an LPI has some effect: We remove or push virtual LPIs
> to their VCPUs, also check the virtual pending bit if an LPI gets enabled.
> 
> Signed-off-by: Andre Przywara <andre.przywara@arm.com>

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>

> ---
>  xen/arch/arm/vgic-v3-its.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 70 insertions(+)
> 
> diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
> index 773e5fb..14fde83 100644
> --- a/xen/arch/arm/vgic-v3-its.c
> +++ b/xen/arch/arm/vgic-v3-its.c
> @@ -449,6 +449,73 @@ static int update_lpi_property(struct domain *d, struct pending_irq *p)
>      return 0;
>  }
>  
> +/*
> + * Checks whether an LPI that got enabled or disabled needs to change
> + * something in the VGIC (added or removed from the LR or queues).
> + * We don't disable the underlying physical LPI, because this requires
> + * queueing a host LPI command, which we can't afford to do on behalf
> + * of a guest.
> + * Must be called with the VCPU VGIC lock held.
> + */
> +static void update_lpi_vgic_status(struct vcpu *v, struct pending_irq *p)
> +{
> +    ASSERT(spin_is_locked(&v->arch.vgic.lock));
> +
> +    if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
> +    {
> +        if ( !list_empty(&p->inflight) &&
> +             !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
> +            gic_raise_guest_irq(v, p->irq, p->lpi_priority);
> +    }
> +    else
> +        gic_remove_from_lr_pending(v, p);
> +}
> +
> +static int its_handle_inv(struct virt_its *its, uint64_t *cmdptr)
> +{
> +    struct domain *d = its->d;
> +    uint32_t devid = its_cmd_get_deviceid(cmdptr);
> +    uint32_t eventid = its_cmd_get_id(cmdptr);
> +    struct pending_irq *p;
> +    unsigned long flags;
> +    struct vcpu *vcpu;
> +    uint32_t vlpi;
> +    int ret = -1;
> +
> +    spin_lock(&its->its_lock);
> +
> +    /* Translate the event into a vCPU/vLPI pair. */
> +    if ( !read_itte_locked(its, devid, eventid, &vcpu, &vlpi) )
> +        goto out_unlock_its;
> +
> +    if ( vlpi == INVALID_LPI )
> +        goto out_unlock_its;
> +
> +    p = gicv3_its_get_event_pending_irq(d, its->doorbell_address,
> +                                        devid, eventid);
> +    if ( unlikely(!p) )
> +        goto out_unlock_its;
> +
> +    spin_lock_irqsave(&vcpu->arch.vgic.lock, flags);
> +
> +    /* Read the property table and update our cached status. */
> +    if ( update_lpi_property(d, p) )
> +        goto out_unlock;
> +
> +    /* Check whether the LPI needs to go on a VCPU. */
> +    update_lpi_vgic_status(vcpu, p);
> +
> +    ret = 0;
> +
> +out_unlock:
> +    spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
> +
> +out_unlock_its:
> +    spin_unlock(&its->its_lock);
> +
> +    return ret;
> +}
> +
>  /* Must be called with the ITS lock held. */
>  static int its_discard_event(struct virt_its *its,
>                               uint32_t vdevid, uint32_t vevid)
> @@ -788,6 +855,9 @@ static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its)
>          case GITS_CMD_INT:
>              ret = its_handle_int(its, command);
>              break;
> +        case GITS_CMD_INV:
> +            ret = its_handle_inv(its, command);
> +            break;
>          case GITS_CMD_MAPC:
>              ret = its_handle_mapc(its, command);
>              break;
> -- 
> 2.9.0
>
diff mbox

Patch

diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index 773e5fb..14fde83 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -449,6 +449,73 @@  static int update_lpi_property(struct domain *d, struct pending_irq *p)
     return 0;
 }
 
+/*
+ * Checks whether an LPI that got enabled or disabled needs to change
+ * something in the VGIC (added or removed from the LR or queues).
+ * We don't disable the underlying physical LPI, because this requires
+ * queueing a host LPI command, which we can't afford to do on behalf
+ * of a guest.
+ * Must be called with the VCPU VGIC lock held.
+ */
+static void update_lpi_vgic_status(struct vcpu *v, struct pending_irq *p)
+{
+    ASSERT(spin_is_locked(&v->arch.vgic.lock));
+
+    if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
+    {
+        if ( !list_empty(&p->inflight) &&
+             !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
+            gic_raise_guest_irq(v, p->irq, p->lpi_priority);
+    }
+    else
+        gic_remove_from_lr_pending(v, p);
+}
+
+static int its_handle_inv(struct virt_its *its, uint64_t *cmdptr)
+{
+    struct domain *d = its->d;
+    uint32_t devid = its_cmd_get_deviceid(cmdptr);
+    uint32_t eventid = its_cmd_get_id(cmdptr);
+    struct pending_irq *p;
+    unsigned long flags;
+    struct vcpu *vcpu;
+    uint32_t vlpi;
+    int ret = -1;
+
+    spin_lock(&its->its_lock);
+
+    /* Translate the event into a vCPU/vLPI pair. */
+    if ( !read_itte_locked(its, devid, eventid, &vcpu, &vlpi) )
+        goto out_unlock_its;
+
+    if ( vlpi == INVALID_LPI )
+        goto out_unlock_its;
+
+    p = gicv3_its_get_event_pending_irq(d, its->doorbell_address,
+                                        devid, eventid);
+    if ( unlikely(!p) )
+        goto out_unlock_its;
+
+    spin_lock_irqsave(&vcpu->arch.vgic.lock, flags);
+
+    /* Read the property table and update our cached status. */
+    if ( update_lpi_property(d, p) )
+        goto out_unlock;
+
+    /* Check whether the LPI needs to go on a VCPU. */
+    update_lpi_vgic_status(vcpu, p);
+
+    ret = 0;
+
+out_unlock:
+    spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
+
+out_unlock_its:
+    spin_unlock(&its->its_lock);
+
+    return ret;
+}
+
 /* Must be called with the ITS lock held. */
 static int its_discard_event(struct virt_its *its,
                              uint32_t vdevid, uint32_t vevid)
@@ -788,6 +855,9 @@  static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its)
         case GITS_CMD_INT:
             ret = its_handle_int(its, command);
             break;
+        case GITS_CMD_INV:
+            ret = its_handle_inv(its, command);
+            break;
         case GITS_CMD_MAPC:
             ret = its_handle_mapc(its, command);
             break;