diff mbox

[v8,23/27] ARM: vITS: handle INV command

Message ID 1491957874-31600-24-git-send-email-andre.przywara@arm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andre Przywara April 12, 2017, 12:44 a.m. UTC
The INV command instructs the ITS to update the configuration data for
a given LPI by re-reading its entry from the property table.
We don't need to care so much about the priority value, but enabling
or disabling an LPI has some effect: We remove or push virtual LPIs
to their VCPUs, also check the virtual pending bit if an LPI gets enabled.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
 xen/arch/arm/vgic-v3-its.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 65 insertions(+)

Comments

Julien Grall April 12, 2017, 5:20 p.m. UTC | #1
Hi Andre,

On 12/04/17 01:44, Andre Przywara wrote:
> The INV command instructs the ITS to update the configuration data for
> a given LPI by re-reading its entry from the property table.
> We don't need to care so much about the priority value, but enabling
> or disabling an LPI has some effect: We remove or push virtual LPIs
> to their VCPUs, also check the virtual pending bit if an LPI gets enabled.
>
> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
> ---
>  xen/arch/arm/vgic-v3-its.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 65 insertions(+)
>
> diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
> index 09cb3af..f2789c5 100644
> --- a/xen/arch/arm/vgic-v3-its.c
> +++ b/xen/arch/arm/vgic-v3-its.c
> @@ -418,6 +418,68 @@ static int update_lpi_property(struct domain *d, uint32_t vlpi,
>      return 0;
>  }
>
> +/*
> + * Checks whether an LPI that got enabled or disabled needs to change
> + * something in the VGIC (added or removed from the LR or queues).
> + * Must be called with the VCPU VGIC lock held.
> + */
> +static void update_lpi_vgic_status(struct vcpu *v, struct pending_irq *p,
> +                                   uint32_t vlpi)

p->irq should be equal to vlpi. No?

> +{
> +    ASSERT(spin_is_locked(&v->arch.vgic.lock));

The locking is likely to wrong here too (see patch #2). For instance 
with a MOVI then INV on interrupt enabled.

> +
> +    if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
> +    {
> +        if ( !list_empty(&p->inflight) &&
> +             !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
> +            gic_raise_guest_irq(v, vlpi, p->lpi_priority);
> +    }
> +    else
> +    {
> +        clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
> +        list_del_init(&p->lr_queue);
> +    }
> +}
> +
> +static int its_handle_inv(struct virt_its *its, uint64_t *cmdptr)
> +{
> +    struct domain *d = its->d;
> +    uint32_t devid = its_cmd_get_deviceid(cmdptr);
> +    uint32_t eventid = its_cmd_get_id(cmdptr);
> +    struct pending_irq *p;
> +    unsigned long flags;
> +    struct vcpu *vcpu;
> +    uint32_t vlpi;
> +    int ret = -1;
> +
> +    /* Translate the event into a vCPU/vLPI pair. */
> +    if ( !read_itte(its, devid, eventid, &vcpu, &vlpi) )
> +        return -1;
> +
> +    if ( vlpi == INVALID_LPI )
> +        return -1;
> +
> +    spin_lock_irqsave(&vcpu->arch.vgic.lock, flags);
> +
> +    p = d->arch.vgic.handler->lpi_to_pending(d, vlpi);
> +    if ( !p )
> +        goto out_unlock;

As said on v5, this could be simpler and use the pending_irqs in the 
device. That would be an improvement though. So a would be good.

> +
> +    /* Read the property table and update our cached status. */
> +    if ( update_lpi_property(d, vlpi, p) )
> +        goto out_unlock;
> +
> +    /* Check whether the LPI needs to go on a VCPU. */
> +    update_lpi_vgic_status(vcpu, p, vlpi);
> +
> +    ret = 0;
> +
> +out_unlock:
> +    spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
> +
> +    return ret;
> +}
> +
>  static int its_handle_mapc(struct virt_its *its, uint64_t *cmdptr)
>  {
>      uint32_t collid = its_cmd_get_collection(cmdptr);
> @@ -757,6 +819,9 @@ static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its)
>          case GITS_CMD_INT:
>              ret = its_handle_int(its, command);
>              break;
> +        case GITS_CMD_INV:
> +            ret = its_handle_inv(its, command);
> +            break;
>          case GITS_CMD_MAPC:
>              ret = its_handle_mapc(its, command);
>              break;
>

Cheers,
Andre Przywara May 10, 2017, 3:11 p.m. UTC | #2
Hi,

On 12/04/17 18:20, Julien Grall wrote:
> Hi Andre,
> 
> On 12/04/17 01:44, Andre Przywara wrote:
>> The INV command instructs the ITS to update the configuration data for
>> a given LPI by re-reading its entry from the property table.
>> We don't need to care so much about the priority value, but enabling
>> or disabling an LPI has some effect: We remove or push virtual LPIs
>> to their VCPUs, also check the virtual pending bit if an LPI gets
>> enabled.
>>
>> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
>> ---
>>  xen/arch/arm/vgic-v3-its.c | 65
>> ++++++++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 65 insertions(+)
>>
>> diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
>> index 09cb3af..f2789c5 100644
>> --- a/xen/arch/arm/vgic-v3-its.c
>> +++ b/xen/arch/arm/vgic-v3-its.c
>> @@ -418,6 +418,68 @@ static int update_lpi_property(struct domain *d,
>> uint32_t vlpi,
>>      return 0;
>>  }
>>
>> +/*
>> + * Checks whether an LPI that got enabled or disabled needs to change
>> + * something in the VGIC (added or removed from the LR or queues).
>> + * Must be called with the VCPU VGIC lock held.
>> + */
>> +static void update_lpi_vgic_status(struct vcpu *v, struct pending_irq
>> *p,
>> +                                   uint32_t vlpi)
> 
> p->irq should be equal to vlpi. No?

It is, by I liked the idea of having logically separate parameters
expressed as such. But I removed vlpi now and am using p->irq instead.

> 
>> +{
>> +    ASSERT(spin_is_locked(&v->arch.vgic.lock));
> 
> The locking is likely to wrong here too (see patch #2). For instance
> with a MOVI then INV on interrupt enabled.
> 
>> +
>> +    if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
>> +    {
>> +        if ( !list_empty(&p->inflight) &&
>> +             !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
>> +            gic_raise_guest_irq(v, vlpi, p->lpi_priority);
>> +    }
>> +    else
>> +    {
>> +        clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
>> +        list_del_init(&p->lr_queue);
>> +    }
>> +}
>> +
>> +static int its_handle_inv(struct virt_its *its, uint64_t *cmdptr)
>> +{
>> +    struct domain *d = its->d;
>> +    uint32_t devid = its_cmd_get_deviceid(cmdptr);
>> +    uint32_t eventid = its_cmd_get_id(cmdptr);
>> +    struct pending_irq *p;
>> +    unsigned long flags;
>> +    struct vcpu *vcpu;
>> +    uint32_t vlpi;
>> +    int ret = -1;
>> +
>> +    /* Translate the event into a vCPU/vLPI pair. */
>> +    if ( !read_itte(its, devid, eventid, &vcpu, &vlpi) )
>> +        return -1;
>> +
>> +    if ( vlpi == INVALID_LPI )
>> +        return -1;
>> +
>> +    spin_lock_irqsave(&vcpu->arch.vgic.lock, flags);
>> +
>> +    p = d->arch.vgic.handler->lpi_to_pending(d, vlpi);
>> +    if ( !p )
>> +        goto out_unlock;
> 
> As said on v5, this could be simpler and use the pending_irqs in the
> device. That would be an improvement though. So a would be good.

Originally I found it more straight-forward to use the one existing
interface (the rbtree) we also use in the VGIC part, which would allow
us to handle locking or ref-counting in one central place.
But indeed the ITS command handling has all the data we need to find the
pending_irq directly from the virtual device.
So I replaced all lpi_to_pending() calls in those handlers with a new
function gicv3_its_get_event_pending_irq(), which looks up the struct
from an ITS/device/event triple.
I take and keep the its->lock for the runtime of these functions, so
those events and their memory will not vanish meanwhile.

Does that make sense?

Cheers,
Andre.

>> +
>> +    /* Read the property table and update our cached status. */
>> +    if ( update_lpi_property(d, vlpi, p) )
>> +        goto out_unlock;
>> +
>> +    /* Check whether the LPI needs to go on a VCPU. */
>> +    update_lpi_vgic_status(vcpu, p, vlpi);
>> +
>> +    ret = 0;
>> +
>> +out_unlock:
>> +    spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
>> +
>> +    return ret;
>> +}
>> +
>>  static int its_handle_mapc(struct virt_its *its, uint64_t *cmdptr)
>>  {
>>      uint32_t collid = its_cmd_get_collection(cmdptr);
>> @@ -757,6 +819,9 @@ static int vgic_its_handle_cmds(struct domain *d,
>> struct virt_its *its)
>>          case GITS_CMD_INT:
>>              ret = its_handle_int(its, command);
>>              break;
>> +        case GITS_CMD_INV:
>> +            ret = its_handle_inv(its, command);
>> +            break;
>>          case GITS_CMD_MAPC:
>>              ret = its_handle_mapc(its, command);
>>              break;
>>
> 
> Cheers,
>
Julien Grall May 11, 2017, 10:43 a.m. UTC | #3
On 10/05/17 16:11, Andre Przywara wrote:
> Hi,

Hi Andre,

> On 12/04/17 18:20, Julien Grall wrote:
>> On 12/04/17 01:44, Andre Przywara wrote:
>>
>>> +{
>>> +    ASSERT(spin_is_locked(&v->arch.vgic.lock));
>>
>> The locking is likely to wrong here too (see patch #2). For instance
>> with a MOVI then INV on interrupt enabled.
>>
>>> +
>>> +    if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
>>> +    {
>>> +        if ( !list_empty(&p->inflight) &&
>>> +             !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
>>> +            gic_raise_guest_irq(v, vlpi, p->lpi_priority);
>>> +    }
>>> +    else
>>> +    {
>>> +        clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
>>> +        list_del_init(&p->lr_queue);
>>> +    }
>>> +}
>>> +
>>> +static int its_handle_inv(struct virt_its *its, uint64_t *cmdptr)
>>> +{
>>> +    struct domain *d = its->d;
>>> +    uint32_t devid = its_cmd_get_deviceid(cmdptr);
>>> +    uint32_t eventid = its_cmd_get_id(cmdptr);
>>> +    struct pending_irq *p;
>>> +    unsigned long flags;
>>> +    struct vcpu *vcpu;
>>> +    uint32_t vlpi;
>>> +    int ret = -1;
>>> +
>>> +    /* Translate the event into a vCPU/vLPI pair. */
>>> +    if ( !read_itte(its, devid, eventid, &vcpu, &vlpi) )
>>> +        return -1;
>>> +
>>> +    if ( vlpi == INVALID_LPI )
>>> +        return -1;
>>> +
>>> +    spin_lock_irqsave(&vcpu->arch.vgic.lock, flags);
>>> +
>>> +    p = d->arch.vgic.handler->lpi_to_pending(d, vlpi);
>>> +    if ( !p )
>>> +        goto out_unlock;
>>
>> As said on v5, this could be simpler and use the pending_irqs in the
>> device. That would be an improvement though. So a would be good.
>
> Originally I found it more straight-forward to use the one existing
> interface (the rbtree) we also use in the VGIC part, which would allow
> us to handle locking or ref-counting in one central place.
> But indeed the ITS command handling has all the data we need to find the
> pending_irq directly from the virtual device.
> So I replaced all lpi_to_pending() calls in those handlers with a new
> function gicv3_its_get_event_pending_irq(), which looks up the struct
> from an ITS/device/event triple.
> I take and keep the its->lock for the runtime of these functions, so
> those events and their memory will not vanish meanwhile.
>
> Does that make sense?

It makes sense to keep the ref-counting in one central place. But it is 
better to avoid reading guest memory and therefore avoid most of 
checking and overhead to translate the IPA to PA.

That's why I suggested to use pending_irqs :).

Cheers,
diff mbox

Patch

diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index 09cb3af..f2789c5 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -418,6 +418,68 @@  static int update_lpi_property(struct domain *d, uint32_t vlpi,
     return 0;
 }
 
+/*
+ * Checks whether an LPI that got enabled or disabled needs to change
+ * something in the VGIC (added or removed from the LR or queues).
+ * Must be called with the VCPU VGIC lock held.
+ */
+static void update_lpi_vgic_status(struct vcpu *v, struct pending_irq *p,
+                                   uint32_t vlpi)
+{
+    ASSERT(spin_is_locked(&v->arch.vgic.lock));
+
+    if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
+    {
+        if ( !list_empty(&p->inflight) &&
+             !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
+            gic_raise_guest_irq(v, vlpi, p->lpi_priority);
+    }
+    else
+    {
+        clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
+        list_del_init(&p->lr_queue);
+    }
+}
+
+static int its_handle_inv(struct virt_its *its, uint64_t *cmdptr)
+{
+    struct domain *d = its->d;
+    uint32_t devid = its_cmd_get_deviceid(cmdptr);
+    uint32_t eventid = its_cmd_get_id(cmdptr);
+    struct pending_irq *p;
+    unsigned long flags;
+    struct vcpu *vcpu;
+    uint32_t vlpi;
+    int ret = -1;
+
+    /* Translate the event into a vCPU/vLPI pair. */
+    if ( !read_itte(its, devid, eventid, &vcpu, &vlpi) )
+        return -1;
+
+    if ( vlpi == INVALID_LPI )
+        return -1;
+
+    spin_lock_irqsave(&vcpu->arch.vgic.lock, flags);
+
+    p = d->arch.vgic.handler->lpi_to_pending(d, vlpi);
+    if ( !p )
+        goto out_unlock;
+
+    /* Read the property table and update our cached status. */
+    if ( update_lpi_property(d, vlpi, p) )
+        goto out_unlock;
+
+    /* Check whether the LPI needs to go on a VCPU. */
+    update_lpi_vgic_status(vcpu, p, vlpi);
+
+    ret = 0;
+
+out_unlock:
+    spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
+
+    return ret;
+}
+
 static int its_handle_mapc(struct virt_its *its, uint64_t *cmdptr)
 {
     uint32_t collid = its_cmd_get_collection(cmdptr);
@@ -757,6 +819,9 @@  static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its)
         case GITS_CMD_INT:
             ret = its_handle_int(its, command);
             break;
+        case GITS_CMD_INV:
+            ret = its_handle_inv(its, command);
+            break;
         case GITS_CMD_MAPC:
             ret = its_handle_mapc(its, command);
             break;