diff mbox

[v9,21/28] ARM: vITS: handle MAPTI command

Message ID 20170511175340.8448-22-andre.przywara@arm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andre Przywara May 11, 2017, 5:53 p.m. UTC
The MAPTI commands associates a DeviceID/EventID pair with a LPI/CPU
pair and actually instantiates LPI interrupts.
We connect the already allocated host LPI to this virtual LPI, so that
any triggering LPI on the host can be quickly forwarded to a guest.
Beside entering the VCPU and the virtual LPI number in the respective
host LPI entry, we also initialize and add the already allocated
struct pending_irq to our radix tree, so that we can now easily find it
by its virtual LPI number.
We also read the property table to update the enabled bit and the
priority for our new LPI, as we might have missed this during an earlier
INVALL call (which only checks mapped LPIs).
Since write_itte_locked() now sees its first usage, we change the
declaration to static.
---
 xen/arch/arm/gic-v3-its.c        |  28 +++++++++
 xen/arch/arm/vgic-v3-its.c       | 124 ++++++++++++++++++++++++++++++++++++++-
 xen/include/asm-arm/gic_v3_its.h |   3 +
 3 files changed, 152 insertions(+), 3 deletions(-)

Comments

Julien Grall May 18, 2017, 2:04 p.m. UTC | #1
Hi Andre,

On 11/05/17 18:53, Andre Przywara wrote:
> The MAPTI commands associates a DeviceID/EventID pair with a LPI/CPU
> pair and actually instantiates LPI interrupts.
> We connect the already allocated host LPI to this virtual LPI, so that
> any triggering LPI on the host can be quickly forwarded to a guest.
> Beside entering the VCPU and the virtual LPI number in the respective
> host LPI entry, we also initialize and add the already allocated
> struct pending_irq to our radix tree, so that we can now easily find it
> by its virtual LPI number.
> We also read the property table to update the enabled bit and the
> priority for our new LPI, as we might have missed this during an earlier
> INVALL call (which only checks mapped LPIs).

This patch is doing more than implementing MAPTI. It also implements 
MAPI and this should be mention in the commit message/title.

> Since write_itte_locked() now sees its first usage, we change the
> declaration to static.

Your signed-off-by is missing here.

> ---
>  xen/arch/arm/gic-v3-its.c        |  28 +++++++++
>  xen/arch/arm/vgic-v3-its.c       | 124 ++++++++++++++++++++++++++++++++++++++-
>  xen/include/asm-arm/gic_v3_its.h |   3 +
>  3 files changed, 152 insertions(+), 3 deletions(-)
>
> diff --git a/xen/arch/arm/gic-v3-its.c b/xen/arch/arm/gic-v3-its.c
> index be4c3e0..8a50f7d 100644
> --- a/xen/arch/arm/gic-v3-its.c
> +++ b/xen/arch/arm/gic-v3-its.c
> @@ -887,6 +887,34 @@ int gicv3_remove_guest_event(struct domain *d, paddr_t vdoorbell_address,
>      return 0;
>  }
>
> +/*
> + * Connects the event ID for an already assigned device to the given VCPU/vLPI
> + * pair. The corresponding physical LPI is already mapped on the host side
> + * (when assigning the physical device to the guest), so we just connect the
> + * target VCPU/vLPI pair to that interrupt to inject it properly if it fires.
> + * Returns a pointer to the already allocated struct pending_irq that is
> + * meant to be used by that event.
> + */
> +struct pending_irq *gicv3_assign_guest_event(struct domain *d,
> +                                             paddr_t vdoorbell_address,
> +                                             uint32_t vdevid, uint32_t veventid,
> +                                             struct vcpu *v, uint32_t virt_lpi)
> +{
> +    struct pending_irq *pirq;
> +    uint32_t host_lpi = 0;
> +
> +    pirq = get_event_pending_irq(d, vdoorbell_address, vdevid, veventid,
> +                                 &host_lpi);
> +
> +    if ( !pirq || !host_lpi )

Again, if you have one valid then the other is valid. If not, then you 
have a bigger problem.

Also, please test host_lpi against INVALID_LPI to avoid assuming it will 
always be 0.

> +        return NULL;
> +
> +    gicv3_lpi_update_host_entry(host_lpi, d->domain_id,
> +                                v ? v->vcpu_id : INVALID_VCPU_ID, virt_lpi);
> +
> +    return pirq;
> +}
> +
>  /* Scan the DT for any ITS nodes and create a list of host ITSes out of it. */
>  void gicv3_its_dt_init(const struct dt_device_node *node)
>  {
> diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
> index 731fe0c..c5c0e5e 100644
> --- a/xen/arch/arm/vgic-v3-its.c
> +++ b/xen/arch/arm/vgic-v3-its.c
> @@ -286,9 +286,9 @@ static bool read_itte(struct virt_its *its, uint32_t devid, uint32_t evid,
>   * If vcpu_ptr is provided, returns the VCPU belonging to that collection.
>   * Must be called with the ITS lock held.
>   */
> -bool write_itte_locked(struct virt_its *its, uint32_t devid,
> -                       uint32_t evid, uint32_t collid, uint32_t vlpi,
> -                       struct vcpu **vcpu_ptr)
> +static bool write_itte_locked(struct virt_its *its, uint32_t devid,
> +                              uint32_t evid, uint32_t collid, uint32_t vlpi,
> +                              struct vcpu **vcpu_ptr)
>  {
>      paddr_t addr;
>      struct vits_itte itte;
> @@ -429,6 +429,33 @@ out_unlock:
>      return ret;
>  }
>
> +/*
> + * For a given virtual LPI read the enabled bit and priority from the virtual
> + * property table and update the virtual IRQ's state in the given pending_irq.
> + * Must be called with the respective VGIC VCPU lock held.
> + */
> +static int update_lpi_property(struct domain *d, struct pending_irq *p)
> +{
> +    paddr_t addr;
> +    uint8_t property;
> +    int ret;
> +
> +    addr = d->arch.vgic.rdist_propbase & GENMASK(51, 12);
> +
> +    ret = vgic_access_guest_memory(d, addr + p->irq - LPI_OFFSET,
> +                                   &property, sizeof(property), false);
> +    if ( ret )
> +        return ret;
> +
> +    p->lpi_priority = property & LPI_PROP_PRIO_MASK;

Again, I don't think this will update lpi_priority atomically. So who is 
preventing this to happen?

> +    if ( property & LPI_PROP_ENABLED )
> +        set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
> +    else
> +        clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
> +
> +    return 0;
> +}
> +
>  /* Must be called with the ITS lock held. */
>  static int its_discard_event(struct virt_its *its,
>                               uint32_t vdevid, uint32_t vevid)
> @@ -556,6 +583,93 @@ static int its_handle_mapd(struct virt_its *its, uint64_t *cmdptr)
>      return ret;
>  }
>
> +static int its_handle_mapti(struct virt_its *its, uint64_t *cmdptr)
> +{
> +    uint32_t devid = its_cmd_get_deviceid(cmdptr);
> +    uint32_t eventid = its_cmd_get_id(cmdptr);
> +    uint32_t intid = its_cmd_get_physical_id(cmdptr), _intid;
> +    uint16_t collid = its_cmd_get_collection(cmdptr);
> +    struct pending_irq *pirq;
> +    struct vcpu *vcpu = NULL;
> +    int ret = -1;
> +
> +    if ( its_cmd_get_command(cmdptr) == GITS_CMD_MAPI )
> +        intid = eventid;
> +
> +    spin_lock(&its->its_lock);
> +    /*
> +     * Check whether there is a valid existing mapping. If yes, behavior is
> +     * unpredictable, we choose to ignore this command here.
> +     * This makes sure we start with a pristine pending_irq below.
> +     */
> +    if ( read_itte_locked(its, devid, eventid, &vcpu, &_intid) &&
> +         _intid != INVALID_LPI )
> +    {
> +        spin_unlock(&its->its_lock);
> +        return -1;
> +    }
> +
> +    /* Enter the mapping in our virtual ITS tables. */
> +    if ( !write_itte_locked(its, devid, eventid, collid, intid, &vcpu) )
> +    {
> +        spin_unlock(&its->its_lock);
> +        return -1;
> +    }
> +
> +    spin_unlock(&its->its_lock);
> +
> +    /*
> +     * Connect this virtual LPI to the corresponding host LPI, which is
> +     * determined by the same device ID and event ID on the host side.
> +     * This returns us the corresponding, still unused pending_irq.
> +     */
> +    pirq = gicv3_assign_guest_event(its->d, its->doorbell_address,
> +                                    devid, eventid, vcpu, intid);
> +    if ( !pirq )
> +        goto out_remove_mapping;
> +
> +    vgic_init_pending_irq(pirq, intid);
> +
> +    /*
> +     * Now read the guest's property table to initialize our cached state.
> +     * It can't fire at this time, because it is not known to the host yet.

gicv3_assign_guest_event will write the mapping between the host LPI and 
virtual LPI above. So could you detail what you mean by "it is not known 
to the host yet"?

> +     * We don't need the VGIC VCPU lock here, because the pending_irq isn't
> +     * in the radix tree yet.
> +     */
> +    ret = update_lpi_property(its->d, pirq);
> +    if ( ret )
> +        goto out_remove_host_entry;
> +
> +    pirq->lpi_vcpu_id = vcpu->vcpu_id;
> +    /*
> +     * Mark this LPI as new, so any older (now unmapped) LPI in any LR
> +     * can be easily recognised as such.
> +     */
> +    set_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &pirq->status);
> +
> +    /*
> +     * Now insert the pending_irq into the domain's LPI tree, so that
> +     * it becomes live.
> +     */
> +    write_lock(&its->d->arch.vgic.pend_lpi_tree_lock);
> +    ret = radix_tree_insert(&its->d->arch.vgic.pend_lpi_tree, intid, pirq);
> +    write_unlock(&its->d->arch.vgic.pend_lpi_tree_lock);
> +
> +    if ( !ret )

So radix_tree_insert could return an error because a memory allocation 
failure or the vlpi was already in use.

Leaving aside the memory allocation failure, this is the only place in 
the code which check if an vlpi was already in use.

So I think there is a potential race condition between this code and 
do_LPI if the vLPI already had a mapping. In that case we may use the 
wrong vCPU lock to protect as it is retrieved from the host LPI array.

I can see two solution to solve this:
	1) Check if the vlpi does not yet have a mapping in the radix tree 
before call gicv3_assign_guest_event. This would require to take the 
radix lock for a longer time. However, I think this would break your 
locking order.
	2) Don't get the vCPU from the host LPI array but from pending_irq.

> +        return 0;
> +
> +out_remove_host_entry:
> +    gicv3_remove_guest_event(its->d, its->doorbell_address, devid, eventid);
> +
> +out_remove_mapping:
> +    spin_lock(&its->its_lock);
> +    write_itte_locked(its, devid, eventid,
> +                      UNMAPPED_COLLECTION, INVALID_LPI, NULL);
> +    spin_unlock(&its->its_lock);
> +
> +    return ret;
> +}
> +
>  #define ITS_CMD_BUFFER_SIZE(baser)      ((((baser) & 0xff) + 1) << 12)
>  #define ITS_CMD_OFFSET(reg)             ((reg) & GENMASK(19, 5))
>
> @@ -597,6 +711,10 @@ static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its)
>          case GITS_CMD_MAPD:
>              ret = its_handle_mapd(its, command);
>              break;
> +        case GITS_CMD_MAPI:
> +        case GITS_CMD_MAPTI:
> +            ret = its_handle_mapti(its, command);
> +            break;
>          case GITS_CMD_SYNC:
>              /* We handle ITS commands synchronously, so we ignore SYNC. */
>              break;
> diff --git a/xen/include/asm-arm/gic_v3_its.h b/xen/include/asm-arm/gic_v3_its.h
> index 6f94e65..9c08cee 100644
> --- a/xen/include/asm-arm/gic_v3_its.h
> +++ b/xen/include/asm-arm/gic_v3_its.h
> @@ -175,6 +175,9 @@ struct pending_irq *gicv3_its_get_event_pending_irq(struct domain *d,
>                                                      uint32_t veventid);
>  int gicv3_remove_guest_event(struct domain *d, paddr_t vdoorbell_address,
>                                       uint32_t vdevid, uint32_t veventid);
> +struct pending_irq *gicv3_assign_guest_event(struct domain *d, paddr_t doorbell,
> +                                             uint32_t devid, uint32_t eventid,
> +                                             struct vcpu *v, uint32_t virt_lpi);
>  void gicv3_lpi_update_host_entry(uint32_t host_lpi, int domain_id,
>                                   unsigned int vcpu_id, uint32_t virt_lpi);
>
>

Cheers,
Stefano Stabellini May 22, 2017, 11:39 p.m. UTC | #2
On Thu, 11 May 2017, Andre Przywara wrote:
> @@ -556,6 +583,93 @@ static int its_handle_mapd(struct virt_its *its, uint64_t *cmdptr)
>      return ret;
>  }
>  
> +static int its_handle_mapti(struct virt_its *its, uint64_t *cmdptr)
> +{
> +    uint32_t devid = its_cmd_get_deviceid(cmdptr);
> +    uint32_t eventid = its_cmd_get_id(cmdptr);
> +    uint32_t intid = its_cmd_get_physical_id(cmdptr), _intid;
> +    uint16_t collid = its_cmd_get_collection(cmdptr);
> +    struct pending_irq *pirq;
> +    struct vcpu *vcpu = NULL;
> +    int ret = -1;

I think we need to check the eventid to be valid, don't we?


> +    if ( its_cmd_get_command(cmdptr) == GITS_CMD_MAPI )
> +        intid = eventid;
> +
> +    spin_lock(&its->its_lock);
> +    /*
> +     * Check whether there is a valid existing mapping. If yes, behavior is
> +     * unpredictable, we choose to ignore this command here.
> +     * This makes sure we start with a pristine pending_irq below.
> +     */
> +    if ( read_itte_locked(its, devid, eventid, &vcpu, &_intid) &&
> +         _intid != INVALID_LPI )
> +    {
> +        spin_unlock(&its->its_lock);
> +        return -1;
> +    }
> +
> +    /* Enter the mapping in our virtual ITS tables. */
> +    if ( !write_itte_locked(its, devid, eventid, collid, intid, &vcpu) )
> +    {
> +        spin_unlock(&its->its_lock);
> +        return -1;
> +    }
> +
> +    spin_unlock(&its->its_lock);
> +
> +    /*
> +     * Connect this virtual LPI to the corresponding host LPI, which is
> +     * determined by the same device ID and event ID on the host side.
> +     * This returns us the corresponding, still unused pending_irq.
> +     */
> +    pirq = gicv3_assign_guest_event(its->d, its->doorbell_address,
> +                                    devid, eventid, vcpu, intid);
> +    if ( !pirq )
> +        goto out_remove_mapping;
> +
> +    vgic_init_pending_irq(pirq, intid);
> +
> +    /*
> +     * Now read the guest's property table to initialize our cached state.
> +     * It can't fire at this time, because it is not known to the host yet.
> +     * We don't need the VGIC VCPU lock here, because the pending_irq isn't
> +     * in the radix tree yet.
> +     */
> +    ret = update_lpi_property(its->d, pirq);
> +    if ( ret )
> +        goto out_remove_host_entry;
> +
> +    pirq->lpi_vcpu_id = vcpu->vcpu_id;
> +    /*
> +     * Mark this LPI as new, so any older (now unmapped) LPI in any LR
> +     * can be easily recognised as such.
> +     */
> +    set_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &pirq->status);
> +
> +    /*
> +     * Now insert the pending_irq into the domain's LPI tree, so that
> +     * it becomes live.
> +     */
> +    write_lock(&its->d->arch.vgic.pend_lpi_tree_lock);
> +    ret = radix_tree_insert(&its->d->arch.vgic.pend_lpi_tree, intid, pirq);
> +    write_unlock(&its->d->arch.vgic.pend_lpi_tree_lock);
> +
> +    if ( !ret )
> +        return 0;
> +
> +out_remove_host_entry:
> +    gicv3_remove_guest_event(its->d, its->doorbell_address, devid, eventid);
> +
> +out_remove_mapping:
> +    spin_lock(&its->its_lock);
> +    write_itte_locked(its, devid, eventid,
> +                      UNMAPPED_COLLECTION, INVALID_LPI, NULL);
> +    spin_unlock(&its->its_lock);
> +
> +    return ret;
> +}
Andre Przywara May 23, 2017, 10:01 a.m. UTC | #3
Hi,

On 23/05/17 00:39, Stefano Stabellini wrote:
> On Thu, 11 May 2017, Andre Przywara wrote:
>> @@ -556,6 +583,93 @@ static int its_handle_mapd(struct virt_its *its, uint64_t *cmdptr)
>>      return ret;
>>  }
>>  
>> +static int its_handle_mapti(struct virt_its *its, uint64_t *cmdptr)
>> +{
>> +    uint32_t devid = its_cmd_get_deviceid(cmdptr);
>> +    uint32_t eventid = its_cmd_get_id(cmdptr);
>> +    uint32_t intid = its_cmd_get_physical_id(cmdptr), _intid;
>> +    uint16_t collid = its_cmd_get_collection(cmdptr);
>> +    struct pending_irq *pirq;
>> +    struct vcpu *vcpu = NULL;
>> +    int ret = -1;
> 
> I think we need to check the eventid to be valid, don't we?

Yes, but this will be done below as part of write_itte_locked(), in fact
already read_itte_locked().
Shall I add a comment about this?

Cheers,
Andre

>> +    if ( its_cmd_get_command(cmdptr) == GITS_CMD_MAPI )
>> +        intid = eventid;
>> +
>> +    spin_lock(&its->its_lock);
>> +    /*
>> +     * Check whether there is a valid existing mapping. If yes, behavior is
>> +     * unpredictable, we choose to ignore this command here.
>> +     * This makes sure we start with a pristine pending_irq below.
>> +     */
>> +    if ( read_itte_locked(its, devid, eventid, &vcpu, &_intid) &&
>> +         _intid != INVALID_LPI )
>> +    {
>> +        spin_unlock(&its->its_lock);
>> +        return -1;
>> +    }
>> +
>> +    /* Enter the mapping in our virtual ITS tables. */
>> +    if ( !write_itte_locked(its, devid, eventid, collid, intid, &vcpu) )
>> +    {
>> +        spin_unlock(&its->its_lock);
>> +        return -1;
>> +    }
>> +
>> +    spin_unlock(&its->its_lock);
>> +
>> +    /*
>> +     * Connect this virtual LPI to the corresponding host LPI, which is
>> +     * determined by the same device ID and event ID on the host side.
>> +     * This returns us the corresponding, still unused pending_irq.
>> +     */
>> +    pirq = gicv3_assign_guest_event(its->d, its->doorbell_address,
>> +                                    devid, eventid, vcpu, intid);
>> +    if ( !pirq )
>> +        goto out_remove_mapping;
>> +
>> +    vgic_init_pending_irq(pirq, intid);
>> +
>> +    /*
>> +     * Now read the guest's property table to initialize our cached state.
>> +     * It can't fire at this time, because it is not known to the host yet.
>> +     * We don't need the VGIC VCPU lock here, because the pending_irq isn't
>> +     * in the radix tree yet.
>> +     */
>> +    ret = update_lpi_property(its->d, pirq);
>> +    if ( ret )
>> +        goto out_remove_host_entry;
>> +
>> +    pirq->lpi_vcpu_id = vcpu->vcpu_id;
>> +    /*
>> +     * Mark this LPI as new, so any older (now unmapped) LPI in any LR
>> +     * can be easily recognised as such.
>> +     */
>> +    set_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &pirq->status);
>> +
>> +    /*
>> +     * Now insert the pending_irq into the domain's LPI tree, so that
>> +     * it becomes live.
>> +     */
>> +    write_lock(&its->d->arch.vgic.pend_lpi_tree_lock);
>> +    ret = radix_tree_insert(&its->d->arch.vgic.pend_lpi_tree, intid, pirq);
>> +    write_unlock(&its->d->arch.vgic.pend_lpi_tree_lock);
>> +
>> +    if ( !ret )
>> +        return 0;
>> +
>> +out_remove_host_entry:
>> +    gicv3_remove_guest_event(its->d, its->doorbell_address, devid, eventid);
>> +
>> +out_remove_mapping:
>> +    spin_lock(&its->its_lock);
>> +    write_itte_locked(its, devid, eventid,
>> +                      UNMAPPED_COLLECTION, INVALID_LPI, NULL);
>> +    spin_unlock(&its->its_lock);
>> +
>> +    return ret;
>> +}
Stefano Stabellini May 23, 2017, 5:44 p.m. UTC | #4
On Tue, 23 May 2017, Andre Przywara wrote:
> Hi,
> 
> On 23/05/17 00:39, Stefano Stabellini wrote:
> > On Thu, 11 May 2017, Andre Przywara wrote:
> >> @@ -556,6 +583,93 @@ static int its_handle_mapd(struct virt_its *its, uint64_t *cmdptr)
> >>      return ret;
> >>  }
> >>  
> >> +static int its_handle_mapti(struct virt_its *its, uint64_t *cmdptr)
> >> +{
> >> +    uint32_t devid = its_cmd_get_deviceid(cmdptr);
> >> +    uint32_t eventid = its_cmd_get_id(cmdptr);
> >> +    uint32_t intid = its_cmd_get_physical_id(cmdptr), _intid;
> >> +    uint16_t collid = its_cmd_get_collection(cmdptr);
> >> +    struct pending_irq *pirq;
> >> +    struct vcpu *vcpu = NULL;
> >> +    int ret = -1;
> > 
> > I think we need to check the eventid to be valid, don't we?
> 
> Yes, but this will be done below as part of write_itte_locked(), in fact
> already read_itte_locked().
> Shall I add a comment about this?

No need, thanks

> 
> >> +    if ( its_cmd_get_command(cmdptr) == GITS_CMD_MAPI )
> >> +        intid = eventid;
> >> +
> >> +    spin_lock(&its->its_lock);
> >> +    /*
> >> +     * Check whether there is a valid existing mapping. If yes, behavior is
> >> +     * unpredictable, we choose to ignore this command here.
> >> +     * This makes sure we start with a pristine pending_irq below.
> >> +     */
> >> +    if ( read_itte_locked(its, devid, eventid, &vcpu, &_intid) &&
> >> +         _intid != INVALID_LPI )
> >> +    {
> >> +        spin_unlock(&its->its_lock);
> >> +        return -1;
> >> +    }
> >> +
> >> +    /* Enter the mapping in our virtual ITS tables. */
> >> +    if ( !write_itte_locked(its, devid, eventid, collid, intid, &vcpu) )
> >> +    {
> >> +        spin_unlock(&its->its_lock);
> >> +        return -1;
> >> +    }
> >> +
> >> +    spin_unlock(&its->its_lock);
> >> +
> >> +    /*
> >> +     * Connect this virtual LPI to the corresponding host LPI, which is
> >> +     * determined by the same device ID and event ID on the host side.
> >> +     * This returns us the corresponding, still unused pending_irq.
> >> +     */
> >> +    pirq = gicv3_assign_guest_event(its->d, its->doorbell_address,
> >> +                                    devid, eventid, vcpu, intid);
> >> +    if ( !pirq )
> >> +        goto out_remove_mapping;
> >> +
> >> +    vgic_init_pending_irq(pirq, intid);
> >> +
> >> +    /*
> >> +     * Now read the guest's property table to initialize our cached state.
> >> +     * It can't fire at this time, because it is not known to the host yet.
> >> +     * We don't need the VGIC VCPU lock here, because the pending_irq isn't
> >> +     * in the radix tree yet.
> >> +     */
> >> +    ret = update_lpi_property(its->d, pirq);
> >> +    if ( ret )
> >> +        goto out_remove_host_entry;
> >> +
> >> +    pirq->lpi_vcpu_id = vcpu->vcpu_id;
> >> +    /*
> >> +     * Mark this LPI as new, so any older (now unmapped) LPI in any LR
> >> +     * can be easily recognised as such.
> >> +     */
> >> +    set_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &pirq->status);
> >> +
> >> +    /*
> >> +     * Now insert the pending_irq into the domain's LPI tree, so that
> >> +     * it becomes live.
> >> +     */
> >> +    write_lock(&its->d->arch.vgic.pend_lpi_tree_lock);
> >> +    ret = radix_tree_insert(&its->d->arch.vgic.pend_lpi_tree, intid, pirq);
> >> +    write_unlock(&its->d->arch.vgic.pend_lpi_tree_lock);
> >> +
> >> +    if ( !ret )
> >> +        return 0;
> >> +
> >> +out_remove_host_entry:
> >> +    gicv3_remove_guest_event(its->d, its->doorbell_address, devid, eventid);
> >> +
> >> +out_remove_mapping:
> >> +    spin_lock(&its->its_lock);
> >> +    write_itte_locked(its, devid, eventid,
> >> +                      UNMAPPED_COLLECTION, INVALID_LPI, NULL);
> >> +    spin_unlock(&its->its_lock);
> >> +
> >> +    return ret;
> >> +}
>
diff mbox

Patch

diff --git a/xen/arch/arm/gic-v3-its.c b/xen/arch/arm/gic-v3-its.c
index be4c3e0..8a50f7d 100644
--- a/xen/arch/arm/gic-v3-its.c
+++ b/xen/arch/arm/gic-v3-its.c
@@ -887,6 +887,34 @@  int gicv3_remove_guest_event(struct domain *d, paddr_t vdoorbell_address,
     return 0;
 }
 
+/*
+ * Connects the event ID for an already assigned device to the given VCPU/vLPI
+ * pair. The corresponding physical LPI is already mapped on the host side
+ * (when assigning the physical device to the guest), so we just connect the
+ * target VCPU/vLPI pair to that interrupt to inject it properly if it fires.
+ * Returns a pointer to the already allocated struct pending_irq that is
+ * meant to be used by that event.
+ */
+struct pending_irq *gicv3_assign_guest_event(struct domain *d,
+                                             paddr_t vdoorbell_address,
+                                             uint32_t vdevid, uint32_t veventid,
+                                             struct vcpu *v, uint32_t virt_lpi)
+{
+    struct pending_irq *pirq;
+    uint32_t host_lpi = 0;
+
+    pirq = get_event_pending_irq(d, vdoorbell_address, vdevid, veventid,
+                                 &host_lpi);
+
+    if ( !pirq || !host_lpi )
+        return NULL;
+
+    gicv3_lpi_update_host_entry(host_lpi, d->domain_id,
+                                v ? v->vcpu_id : INVALID_VCPU_ID, virt_lpi);
+
+    return pirq;
+}
+
 /* Scan the DT for any ITS nodes and create a list of host ITSes out of it. */
 void gicv3_its_dt_init(const struct dt_device_node *node)
 {
diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index 731fe0c..c5c0e5e 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -286,9 +286,9 @@  static bool read_itte(struct virt_its *its, uint32_t devid, uint32_t evid,
  * If vcpu_ptr is provided, returns the VCPU belonging to that collection.
  * Must be called with the ITS lock held.
  */
-bool write_itte_locked(struct virt_its *its, uint32_t devid,
-                       uint32_t evid, uint32_t collid, uint32_t vlpi,
-                       struct vcpu **vcpu_ptr)
+static bool write_itte_locked(struct virt_its *its, uint32_t devid,
+                              uint32_t evid, uint32_t collid, uint32_t vlpi,
+                              struct vcpu **vcpu_ptr)
 {
     paddr_t addr;
     struct vits_itte itte;
@@ -429,6 +429,33 @@  out_unlock:
     return ret;
 }
 
+/*
+ * For a given virtual LPI read the enabled bit and priority from the virtual
+ * property table and update the virtual IRQ's state in the given pending_irq.
+ * Must be called with the respective VGIC VCPU lock held.
+ */
+static int update_lpi_property(struct domain *d, struct pending_irq *p)
+{
+    paddr_t addr;
+    uint8_t property;
+    int ret;
+
+    addr = d->arch.vgic.rdist_propbase & GENMASK(51, 12);
+
+    ret = vgic_access_guest_memory(d, addr + p->irq - LPI_OFFSET,
+                                   &property, sizeof(property), false);
+    if ( ret )
+        return ret;
+
+    p->lpi_priority = property & LPI_PROP_PRIO_MASK;
+    if ( property & LPI_PROP_ENABLED )
+        set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
+    else
+        clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
+
+    return 0;
+}
+
 /* Must be called with the ITS lock held. */
 static int its_discard_event(struct virt_its *its,
                              uint32_t vdevid, uint32_t vevid)
@@ -556,6 +583,93 @@  static int its_handle_mapd(struct virt_its *its, uint64_t *cmdptr)
     return ret;
 }
 
+static int its_handle_mapti(struct virt_its *its, uint64_t *cmdptr)
+{
+    uint32_t devid = its_cmd_get_deviceid(cmdptr);
+    uint32_t eventid = its_cmd_get_id(cmdptr);
+    uint32_t intid = its_cmd_get_physical_id(cmdptr), _intid;
+    uint16_t collid = its_cmd_get_collection(cmdptr);
+    struct pending_irq *pirq;
+    struct vcpu *vcpu = NULL;
+    int ret = -1;
+
+    if ( its_cmd_get_command(cmdptr) == GITS_CMD_MAPI )
+        intid = eventid;
+
+    spin_lock(&its->its_lock);
+    /*
+     * Check whether there is a valid existing mapping. If yes, behavior is
+     * unpredictable, we choose to ignore this command here.
+     * This makes sure we start with a pristine pending_irq below.
+     */
+    if ( read_itte_locked(its, devid, eventid, &vcpu, &_intid) &&
+         _intid != INVALID_LPI )
+    {
+        spin_unlock(&its->its_lock);
+        return -1;
+    }
+
+    /* Enter the mapping in our virtual ITS tables. */
+    if ( !write_itte_locked(its, devid, eventid, collid, intid, &vcpu) )
+    {
+        spin_unlock(&its->its_lock);
+        return -1;
+    }
+
+    spin_unlock(&its->its_lock);
+
+    /*
+     * Connect this virtual LPI to the corresponding host LPI, which is
+     * determined by the same device ID and event ID on the host side.
+     * This returns us the corresponding, still unused pending_irq.
+     */
+    pirq = gicv3_assign_guest_event(its->d, its->doorbell_address,
+                                    devid, eventid, vcpu, intid);
+    if ( !pirq )
+        goto out_remove_mapping;
+
+    vgic_init_pending_irq(pirq, intid);
+
+    /*
+     * Now read the guest's property table to initialize our cached state.
+     * It can't fire at this time, because it is not known to the host yet.
+     * We don't need the VGIC VCPU lock here, because the pending_irq isn't
+     * in the radix tree yet.
+     */
+    ret = update_lpi_property(its->d, pirq);
+    if ( ret )
+        goto out_remove_host_entry;
+
+    pirq->lpi_vcpu_id = vcpu->vcpu_id;
+    /*
+     * Mark this LPI as new, so any older (now unmapped) LPI in any LR
+     * can be easily recognised as such.
+     */
+    set_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &pirq->status);
+
+    /*
+     * Now insert the pending_irq into the domain's LPI tree, so that
+     * it becomes live.
+     */
+    write_lock(&its->d->arch.vgic.pend_lpi_tree_lock);
+    ret = radix_tree_insert(&its->d->arch.vgic.pend_lpi_tree, intid, pirq);
+    write_unlock(&its->d->arch.vgic.pend_lpi_tree_lock);
+
+    if ( !ret )
+        return 0;
+
+out_remove_host_entry:
+    gicv3_remove_guest_event(its->d, its->doorbell_address, devid, eventid);
+
+out_remove_mapping:
+    spin_lock(&its->its_lock);
+    write_itte_locked(its, devid, eventid,
+                      UNMAPPED_COLLECTION, INVALID_LPI, NULL);
+    spin_unlock(&its->its_lock);
+
+    return ret;
+}
+
 #define ITS_CMD_BUFFER_SIZE(baser)      ((((baser) & 0xff) + 1) << 12)
 #define ITS_CMD_OFFSET(reg)             ((reg) & GENMASK(19, 5))
 
@@ -597,6 +711,10 @@  static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its)
         case GITS_CMD_MAPD:
             ret = its_handle_mapd(its, command);
             break;
+        case GITS_CMD_MAPI:
+        case GITS_CMD_MAPTI:
+            ret = its_handle_mapti(its, command);
+            break;
         case GITS_CMD_SYNC:
             /* We handle ITS commands synchronously, so we ignore SYNC. */
             break;
diff --git a/xen/include/asm-arm/gic_v3_its.h b/xen/include/asm-arm/gic_v3_its.h
index 6f94e65..9c08cee 100644
--- a/xen/include/asm-arm/gic_v3_its.h
+++ b/xen/include/asm-arm/gic_v3_its.h
@@ -175,6 +175,9 @@  struct pending_irq *gicv3_its_get_event_pending_irq(struct domain *d,
                                                     uint32_t veventid);
 int gicv3_remove_guest_event(struct domain *d, paddr_t vdoorbell_address,
                                      uint32_t vdevid, uint32_t veventid);
+struct pending_irq *gicv3_assign_guest_event(struct domain *d, paddr_t doorbell,
+                                             uint32_t devid, uint32_t eventid,
+                                             struct vcpu *v, uint32_t virt_lpi);
 void gicv3_lpi_update_host_entry(uint32_t host_lpi, int domain_id,
                                  unsigned int vcpu_id, uint32_t virt_lpi);