diff mbox

[v8,14/27] ARM: vITS: introduce translation table walks

Message ID 1491957874-31600-15-git-send-email-andre.przywara@arm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andre Przywara April 12, 2017, 12:44 a.m. UTC
The ITS stores the target (v)CPU and the (virtual) LPI number in tables.
Introduce functions to walk those tables and translate an device ID -
event ID pair into a pair of virtual LPI and vCPU.
We map those tables on demand - which is cheap on arm64 - and copy the
respective entries before using them, to avoid the guest tampering with
them meanwhile.

To allow compiling without warnings, we declare two functions as
non-static for the moment, which two later patches will fix.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
 xen/arch/arm/vgic-v3-its.c | 183 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 183 insertions(+)

Comments

Julien Grall April 12, 2017, 1:22 p.m. UTC | #1
On 12/04/17 01:44, Andre Przywara wrote:
> The ITS stores the target (v)CPU and the (virtual) LPI number in tables.
> Introduce functions to walk those tables and translate an device ID -
> event ID pair into a pair of virtual LPI and vCPU.
> We map those tables on demand - which is cheap on arm64 - and copy the
> respective entries before using them, to avoid the guest tampering with
> them meanwhile.
>
> To allow compiling without warnings, we declare two functions as
> non-static for the moment, which two later patches will fix.
>
> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
> ---
>  xen/arch/arm/vgic-v3-its.c | 183 +++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 183 insertions(+)
>
> diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
> index a60f9b2..632ab84 100644
> --- a/xen/arch/arm/vgic-v3-its.c
> +++ b/xen/arch/arm/vgic-v3-its.c
> @@ -78,6 +78,7 @@ struct vits_itte
>      uint16_t pad;
>  };
>
> +#define UNMAPPED_COLLECTION      ((uint16_t)~0)

On v5 (patch #28), I asked to minimize the use of uint16_t (resp. 
uint64_t) when it refers to collection (resp. ITTE). But it sounds like 
you missed again that point.

>  #define GITS_BASER_RO_MASK       (GITS_BASER_TYPE_MASK | \
>                                    (31UL << GITS_BASER_ENTRY_SIZE_SHIFT))
>
> @@ -94,6 +95,188 @@ void vgic_v3_its_free_domain(struct domain *d)
>      ASSERT(RB_EMPTY_ROOT(&d->arch.vgic.its_devices));
>  }
>
> +/*
> + * The physical address is encoded slightly differently depending on
> + * the used page size: the highest four bits are stored in the lowest
> + * four bits of the field for 64K pages.
> + */
> +static paddr_t get_baser_phys_addr(uint64_t reg)
> +{
> +    if ( reg & BIT(9) )
> +        return (reg & GENMASK(47, 16)) |
> +                ((reg & GENMASK(15, 12)) << 36);
> +    else
> +        return reg & GENMASK(47, 12);
> +}
> +
> +/*
> + * Our collection table encoding:
> + * Just contains the 16-bit VCPU ID of the respective vCPU.
> + */
> +#define COLL_TABLE_ENTRY_SIZE 2UL
> +
> +/* Must be called with the ITS lock held. */
> +static struct vcpu *get_vcpu_from_collection(struct virt_its *its,
> +                                             uint16_t collid)
> +{
> +    paddr_t addr = get_baser_phys_addr(its->baser_coll);
> +    uint16_t vcpu_id;

I don't see any BUILD_BUG_ON making sure you can store a vCPU ID in 
uint16_t.

> +    int ret;
> +
> +    ASSERT(spin_is_locked(&its->its_lock));
> +
> +    if ( collid >= its->max_collections )
> +        return NULL;
> +
> +    ret = vgic_access_guest_memory(its->d, addr + collid * sizeof(uint16_t),
> +                                   &vcpu_id, sizeof(vcpu_id), false);

Minimizing the use of uint16_t will likely make this code less confusing 
to read as it mixes sizeof(vcpu_id) and sizeof(uint16_t).

> +    if ( ret )
> +        return NULL;
> +
> +    if ( vcpu_id == UNMAPPED_COLLECTION || vcpu_id >= its->d->max_vcpus )
> +        return NULL;
> +
> +    return its->d->vcpu[vcpu_id];
> +}
> +
> +/*
> + * Our device table encodings:
> + * Contains the guest physical address of the Interrupt Translation Table in
> + * bits [51:8], and the size of it is encoded as the number of bits minus one
> + * in the lowest 5 bits of the word.
> + */
> +#define DEV_TABLE_ENTRY_SIZE  8UL
> +#define DEV_TABLE_ITT_ADDR(x) ((x) & GENMASK(51, 8))
> +#define DEV_TABLE_ITT_SIZE(x) (BIT(((x) & GENMASK(4, 0)) + 1))
> +#define DEV_TABLE_ENTRY(addr, bits)                     \
> +        (((addr) & GENMASK(51, 8)) | (((bits) - 1) & GENMASK(4, 0)))
> +
> +/*
> + * Lookup the address of the Interrupt Translation Table associated with
> + * that device ID.
> + * TODO: add support for walking indirect tables.
> + */
> +static int its_get_itt(struct virt_its *its, uint32_t devid,
> +                       uint64_t *itt)
> +{
> +    paddr_t addr = get_baser_phys_addr(its->baser_dev);
> +
> +    if ( devid >= its->max_devices )
> +        return -EINVAL;
> +
> +    return vgic_access_guest_memory(its->d, addr + devid * sizeof(uint64_t),
> +                                    itt, sizeof(*itt), false);

See my remark above for uint64_t.

> +}
> +
> +/*
> + * Lookup the address of the Interrupt Translation Table associated with
> + * a device ID and return the address of the ITTE belonging to the event ID
> + * (which is an index into that table).
> + */
> +static paddr_t its_get_itte_address(struct virt_its *its,
> +                                    uint32_t devid, uint32_t evid)
> +{
> +    uint64_t itt;
> +    int ret;
> +
> +    ret = its_get_itt(its, devid, &itt);
> +    if ( ret )
> +        return INVALID_PADDR;
> +
> +    if ( evid >= DEV_TABLE_ITT_SIZE(itt) ||
> +         DEV_TABLE_ITT_ADDR(itt) == INVALID_PADDR )
> +        return INVALID_PADDR;
> +
> +    return DEV_TABLE_ITT_ADDR(itt) + evid * sizeof(struct vits_itte);
> +}
> +
> +/*
> + * Queries the collection and device tables to get the vCPU and virtual
> + * LPI number for a given guest event. This first accesses the guest memory
> + * to resolve the address of the ITTE, then reads the ITTE entry at this
> + * address and puts the result in vcpu_ptr and vlpi_ptr.
> + * Must be called with the ITS lock held.
> + */
> +static bool read_itte_locked(struct virt_its *its, uint32_t devid,
> +                             uint32_t evid, struct vcpu **vcpu_ptr,
> +                             uint32_t *vlpi_ptr)
> +{
> +    paddr_t addr;
> +    struct vits_itte itte;
> +    struct vcpu *vcpu;
> +
> +    ASSERT(spin_is_locked(&its->its_lock));
> +
> +    addr = its_get_itte_address(its, devid, evid);
> +    if ( addr == INVALID_PADDR )
> +        return false;
> +
> +    if ( vgic_access_guest_memory(its->d, addr, &itte, sizeof(itte), false) )
> +        return false;
> +
> +    vcpu = get_vcpu_from_collection(its, itte.collection);
> +    if ( !vcpu )
> +        return false;
> +
> +    *vcpu_ptr = vcpu;
> +    *vlpi_ptr = itte.vlpi;
> +    return true;
> +}
> +
> +/*
> + * This function takes care of the locking by taking the its_lock itself, so
> + * a caller shall not hold this. Before returning, the lock is dropped again.
> + */
> +bool read_itte(struct virt_its *its, uint32_t devid, uint32_t evid,
> +               struct vcpu **vcpu_ptr, uint32_t *vlpi_ptr)
> +{
> +    bool ret;
> +
> +    spin_lock(&its->its_lock);
> +    ret = read_itte_locked(its, devid, evid, vcpu_ptr, vlpi_ptr);
> +    spin_unlock(&its->its_lock);
> +
> +    return ret;
> +}
> +
> +/*
> + * Queries the collection and device tables to translate the device ID and
> + * event ID and find the appropriate ITTE. The given collection ID and the
> + * virtual LPI number are then stored into that entry.
> + * If vcpu_ptr is provided, returns the VCPU belonging to that collection.
> + * Must be called with the ITS lock held.
> + */
> +bool write_itte_locked(struct virt_its *its, uint32_t devid,
> +                       uint32_t evid, uint32_t collid, uint32_t vlpi,
> +                       struct vcpu **vcpu_ptr)
> +{
> +    paddr_t addr;
> +    struct vits_itte itte;
> +
> +    ASSERT(spin_is_locked(&its->its_lock));
> +
> +    if ( collid >= its->max_collections )
> +        return false;
> +
> +    if ( vlpi >= its->d->arch.vgic.nr_lpis )
> +        return false;
> +
> +    addr = its_get_itte_address(its, devid, evid);
> +    if ( addr == INVALID_PADDR )
> +        return false;
> +
> +    itte.collection = collid;
> +    itte.vlpi = vlpi;
> +
> +    if ( vgic_access_guest_memory(its->d, addr, &itte, sizeof(itte), true) )
> +        return false;
> +
> +    if ( vcpu_ptr )
> +        *vcpu_ptr = get_vcpu_from_collection(its, collid);
> +
> +    return true;
> +}
> +
>  /**************************************
>   * Functions that handle ITS commands *
>   **************************************/
>
Andre Przywara April 12, 2017, 1:36 p.m. UTC | #2
Hi,

On 12/04/17 14:22, Julien Grall wrote:
> 
> 
> On 12/04/17 01:44, Andre Przywara wrote:
>> The ITS stores the target (v)CPU and the (virtual) LPI number in tables.
>> Introduce functions to walk those tables and translate an device ID -
>> event ID pair into a pair of virtual LPI and vCPU.
>> We map those tables on demand - which is cheap on arm64 - and copy the
>> respective entries before using them, to avoid the guest tampering with
>> them meanwhile.
>>
>> To allow compiling without warnings, we declare two functions as
>> non-static for the moment, which two later patches will fix.
>>
>> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
>> ---
>>  xen/arch/arm/vgic-v3-its.c | 183
>> +++++++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 183 insertions(+)
>>
>> diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
>> index a60f9b2..632ab84 100644
>> --- a/xen/arch/arm/vgic-v3-its.c
>> +++ b/xen/arch/arm/vgic-v3-its.c
>> @@ -78,6 +78,7 @@ struct vits_itte
>>      uint16_t pad;
>>  };
>>
>> +#define UNMAPPED_COLLECTION      ((uint16_t)~0)
> 
> On v5 (patch #28), I asked to minimize the use of uint16_t (resp.
> uint64_t) when it refers to collection (resp. ITTE). But it sounds like
> you missed again that point.

Well, I introduced that below, just failed to use it *everywhere*.

...

> 
>>  #define GITS_BASER_RO_MASK       (GITS_BASER_TYPE_MASK | \
>>                                    (31UL << GITS_BASER_ENTRY_SIZE_SHIFT))
>>
>> @@ -94,6 +95,188 @@ void vgic_v3_its_free_domain(struct domain *d)
>>      ASSERT(RB_EMPTY_ROOT(&d->arch.vgic.its_devices));
>>  }
>>
>> +/*
>> + * The physical address is encoded slightly differently depending on
>> + * the used page size: the highest four bits are stored in the lowest
>> + * four bits of the field for 64K pages.
>> + */
>> +static paddr_t get_baser_phys_addr(uint64_t reg)
>> +{
>> +    if ( reg & BIT(9) )
>> +        return (reg & GENMASK(47, 16)) |
>> +                ((reg & GENMASK(15, 12)) << 36);
>> +    else
>> +        return reg & GENMASK(47, 12);
>> +}
>> +
>> +/*
>> + * Our collection table encoding:
>> + * Just contains the 16-bit VCPU ID of the respective vCPU.
>> + */
>> +#define COLL_TABLE_ENTRY_SIZE 2UL

...		(here)

>> +
>> +/* Must be called with the ITS lock held. */
>> +static struct vcpu *get_vcpu_from_collection(struct virt_its *its,
>> +                                             uint16_t collid)
>> +{
>> +    paddr_t addr = get_baser_phys_addr(its->baser_coll);
>> +    uint16_t vcpu_id;
> 
> I don't see any BUILD_BUG_ON making sure you can store a vCPU ID in
> uint16_t.
> 
>> +    int ret;
>> +
>> +    ASSERT(spin_is_locked(&its->its_lock));
>> +
>> +    if ( collid >= its->max_collections )
>> +        return NULL;
>> +
>> +    ret = vgic_access_guest_memory(its->d, addr + collid *
>> sizeof(uint16_t),
>> +                                   &vcpu_id, sizeof(vcpu_id), false);
> 
> Minimizing the use of uint16_t will likely make this code less confusing
> to read as it mixes sizeof(vcpu_id) and sizeof(uint16_t).

The idea here is to use the actual type of the variable used instead of
putting an explicit type in here, since this can get out of sync.

So would a typedef meet your expectations?

	typedef uint16_t coll_table_entry_t;
	typedef uint64_t dev_table_entry_t;

And then use that throughout the code?

Cheers,
Andre.

> 
>> +    if ( ret )
>> +        return NULL;
>> +
>> +    if ( vcpu_id == UNMAPPED_COLLECTION || vcpu_id >=
>> its->d->max_vcpus )
>> +        return NULL;
>> +
>> +    return its->d->vcpu[vcpu_id];
>> +}
>> +
>> +/*
>> + * Our device table encodings:
>> + * Contains the guest physical address of the Interrupt Translation
>> Table in
>> + * bits [51:8], and the size of it is encoded as the number of bits
>> minus one
>> + * in the lowest 5 bits of the word.
>> + */
>> +#define DEV_TABLE_ENTRY_SIZE  8UL
>> +#define DEV_TABLE_ITT_ADDR(x) ((x) & GENMASK(51, 8))
>> +#define DEV_TABLE_ITT_SIZE(x) (BIT(((x) & GENMASK(4, 0)) + 1))
>> +#define DEV_TABLE_ENTRY(addr, bits)                     \
>> +        (((addr) & GENMASK(51, 8)) | (((bits) - 1) & GENMASK(4, 0)))
>> +
>> +/*
>> + * Lookup the address of the Interrupt Translation Table associated with
>> + * that device ID.
>> + * TODO: add support for walking indirect tables.
>> + */
>> +static int its_get_itt(struct virt_its *its, uint32_t devid,
>> +                       uint64_t *itt)
>> +{
>> +    paddr_t addr = get_baser_phys_addr(its->baser_dev);
>> +
>> +    if ( devid >= its->max_devices )
>> +        return -EINVAL;
>> +
>> +    return vgic_access_guest_memory(its->d, addr + devid *
>> sizeof(uint64_t),
>> +                                    itt, sizeof(*itt), false);
> 
> See my remark above for uint64_t.
> 
>> +}
>> +
>> +/*
>> + * Lookup the address of the Interrupt Translation Table associated with
>> + * a device ID and return the address of the ITTE belonging to the
>> event ID
>> + * (which is an index into that table).
>> + */
>> +static paddr_t its_get_itte_address(struct virt_its *its,
>> +                                    uint32_t devid, uint32_t evid)
>> +{
>> +    uint64_t itt;
>> +    int ret;
>> +
>> +    ret = its_get_itt(its, devid, &itt);
>> +    if ( ret )
>> +        return INVALID_PADDR;
>> +
>> +    if ( evid >= DEV_TABLE_ITT_SIZE(itt) ||
>> +         DEV_TABLE_ITT_ADDR(itt) == INVALID_PADDR )
>> +        return INVALID_PADDR;
>> +
>> +    return DEV_TABLE_ITT_ADDR(itt) + evid * sizeof(struct vits_itte);
>> +}
>> +
>> +/*
>> + * Queries the collection and device tables to get the vCPU and virtual
>> + * LPI number for a given guest event. This first accesses the guest
>> memory
>> + * to resolve the address of the ITTE, then reads the ITTE entry at this
>> + * address and puts the result in vcpu_ptr and vlpi_ptr.
>> + * Must be called with the ITS lock held.
>> + */
>> +static bool read_itte_locked(struct virt_its *its, uint32_t devid,
>> +                             uint32_t evid, struct vcpu **vcpu_ptr,
>> +                             uint32_t *vlpi_ptr)
>> +{
>> +    paddr_t addr;
>> +    struct vits_itte itte;
>> +    struct vcpu *vcpu;
>> +
>> +    ASSERT(spin_is_locked(&its->its_lock));
>> +
>> +    addr = its_get_itte_address(its, devid, evid);
>> +    if ( addr == INVALID_PADDR )
>> +        return false;
>> +
>> +    if ( vgic_access_guest_memory(its->d, addr, &itte, sizeof(itte),
>> false) )
>> +        return false;
>> +
>> +    vcpu = get_vcpu_from_collection(its, itte.collection);
>> +    if ( !vcpu )
>> +        return false;
>> +
>> +    *vcpu_ptr = vcpu;
>> +    *vlpi_ptr = itte.vlpi;
>> +    return true;
>> +}
>> +
>> +/*
>> + * This function takes care of the locking by taking the its_lock
>> itself, so
>> + * a caller shall not hold this. Before returning, the lock is
>> dropped again.
>> + */
>> +bool read_itte(struct virt_its *its, uint32_t devid, uint32_t evid,
>> +               struct vcpu **vcpu_ptr, uint32_t *vlpi_ptr)
>> +{
>> +    bool ret;
>> +
>> +    spin_lock(&its->its_lock);
>> +    ret = read_itte_locked(its, devid, evid, vcpu_ptr, vlpi_ptr);
>> +    spin_unlock(&its->its_lock);
>> +
>> +    return ret;
>> +}
>> +
>> +/*
>> + * Queries the collection and device tables to translate the device
>> ID and
>> + * event ID and find the appropriate ITTE. The given collection ID
>> and the
>> + * virtual LPI number are then stored into that entry.
>> + * If vcpu_ptr is provided, returns the VCPU belonging to that
>> collection.
>> + * Must be called with the ITS lock held.
>> + */
>> +bool write_itte_locked(struct virt_its *its, uint32_t devid,
>> +                       uint32_t evid, uint32_t collid, uint32_t vlpi,
>> +                       struct vcpu **vcpu_ptr)
>> +{
>> +    paddr_t addr;
>> +    struct vits_itte itte;
>> +
>> +    ASSERT(spin_is_locked(&its->its_lock));
>> +
>> +    if ( collid >= its->max_collections )
>> +        return false;
>> +
>> +    if ( vlpi >= its->d->arch.vgic.nr_lpis )
>> +        return false;
>> +
>> +    addr = its_get_itte_address(its, devid, evid);
>> +    if ( addr == INVALID_PADDR )
>> +        return false;
>> +
>> +    itte.collection = collid;
>> +    itte.vlpi = vlpi;
>> +
>> +    if ( vgic_access_guest_memory(its->d, addr, &itte, sizeof(itte),
>> true) )
>> +        return false;
>> +
>> +    if ( vcpu_ptr )
>> +        *vcpu_ptr = get_vcpu_from_collection(its, collid);
>> +
>> +    return true;
>> +}
>> +
>>  /**************************************
>>   * Functions that handle ITS commands *
>>   **************************************/
>>
>
Julien Grall April 12, 2017, 1:37 p.m. UTC | #3
On 12/04/17 14:36, Andre Przywara wrote:
> Hi,
>
> On 12/04/17 14:22, Julien Grall wrote:
>>
>>
>> On 12/04/17 01:44, Andre Przywara wrote:
>>> The ITS stores the target (v)CPU and the (virtual) LPI number in tables.
>>> Introduce functions to walk those tables and translate an device ID -
>>> event ID pair into a pair of virtual LPI and vCPU.
>>> We map those tables on demand - which is cheap on arm64 - and copy the
>>> respective entries before using them, to avoid the guest tampering with
>>> them meanwhile.
>>>
>>> To allow compiling without warnings, we declare two functions as
>>> non-static for the moment, which two later patches will fix.
>>>
>>> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
>>> ---
>>>  xen/arch/arm/vgic-v3-its.c | 183
>>> +++++++++++++++++++++++++++++++++++++++++++++
>>>  1 file changed, 183 insertions(+)
>>>
>>> diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
>>> index a60f9b2..632ab84 100644
>>> --- a/xen/arch/arm/vgic-v3-its.c
>>> +++ b/xen/arch/arm/vgic-v3-its.c
>>> @@ -78,6 +78,7 @@ struct vits_itte
>>>      uint16_t pad;
>>>  };
>>>
>>> +#define UNMAPPED_COLLECTION      ((uint16_t)~0)
>>
>> On v5 (patch #28), I asked to minimize the use of uint16_t (resp.
>> uint64_t) when it refers to collection (resp. ITTE). But it sounds like
>> you missed again that point.
>
> Well, I introduced that below, just failed to use it *everywhere*.
>
> ...
>
>>
>>>  #define GITS_BASER_RO_MASK       (GITS_BASER_TYPE_MASK | \
>>>                                    (31UL << GITS_BASER_ENTRY_SIZE_SHIFT))
>>>
>>> @@ -94,6 +95,188 @@ void vgic_v3_its_free_domain(struct domain *d)
>>>      ASSERT(RB_EMPTY_ROOT(&d->arch.vgic.its_devices));
>>>  }
>>>
>>> +/*
>>> + * The physical address is encoded slightly differently depending on
>>> + * the used page size: the highest four bits are stored in the lowest
>>> + * four bits of the field for 64K pages.
>>> + */
>>> +static paddr_t get_baser_phys_addr(uint64_t reg)
>>> +{
>>> +    if ( reg & BIT(9) )
>>> +        return (reg & GENMASK(47, 16)) |
>>> +                ((reg & GENMASK(15, 12)) << 36);
>>> +    else
>>> +        return reg & GENMASK(47, 12);
>>> +}
>>> +
>>> +/*
>>> + * Our collection table encoding:
>>> + * Just contains the 16-bit VCPU ID of the respective vCPU.
>>> + */
>>> +#define COLL_TABLE_ENTRY_SIZE 2UL
>
> ...		(here)
>
>>> +
>>> +/* Must be called with the ITS lock held. */
>>> +static struct vcpu *get_vcpu_from_collection(struct virt_its *its,
>>> +                                             uint16_t collid)
>>> +{
>>> +    paddr_t addr = get_baser_phys_addr(its->baser_coll);
>>> +    uint16_t vcpu_id;
>>
>> I don't see any BUILD_BUG_ON making sure you can store a vCPU ID in
>> uint16_t.
>>
>>> +    int ret;
>>> +
>>> +    ASSERT(spin_is_locked(&its->its_lock));
>>> +
>>> +    if ( collid >= its->max_collections )
>>> +        return NULL;
>>> +
>>> +    ret = vgic_access_guest_memory(its->d, addr + collid *
>>> sizeof(uint16_t),
>>> +                                   &vcpu_id, sizeof(vcpu_id), false);
>>
>> Minimizing the use of uint16_t will likely make this code less confusing
>> to read as it mixes sizeof(vcpu_id) and sizeof(uint16_t).
>
> The idea here is to use the actual type of the variable used instead of
> putting an explicit type in here, since this can get out of sync.
>
> So would a typedef meet your expectations?
>
> 	typedef uint16_t coll_table_entry_t;
> 	typedef uint64_t dev_table_entry_t;
>
> And then use that throughout the code?

Yes please.

Cheers,
diff mbox

Patch

diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index a60f9b2..632ab84 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -78,6 +78,7 @@  struct vits_itte
     uint16_t pad;
 };
 
+#define UNMAPPED_COLLECTION      ((uint16_t)~0)
 #define GITS_BASER_RO_MASK       (GITS_BASER_TYPE_MASK | \
                                   (31UL << GITS_BASER_ENTRY_SIZE_SHIFT))
 
@@ -94,6 +95,188 @@  void vgic_v3_its_free_domain(struct domain *d)
     ASSERT(RB_EMPTY_ROOT(&d->arch.vgic.its_devices));
 }
 
+/*
+ * The physical address is encoded slightly differently depending on
+ * the used page size: the highest four bits are stored in the lowest
+ * four bits of the field for 64K pages.
+ */
+static paddr_t get_baser_phys_addr(uint64_t reg)
+{
+    if ( reg & BIT(9) )
+        return (reg & GENMASK(47, 16)) |
+                ((reg & GENMASK(15, 12)) << 36);
+    else
+        return reg & GENMASK(47, 12);
+}
+
+/*
+ * Our collection table encoding:
+ * Just contains the 16-bit VCPU ID of the respective vCPU.
+ */
+#define COLL_TABLE_ENTRY_SIZE 2UL
+
+/* Must be called with the ITS lock held. */
+static struct vcpu *get_vcpu_from_collection(struct virt_its *its,
+                                             uint16_t collid)
+{
+    paddr_t addr = get_baser_phys_addr(its->baser_coll);
+    uint16_t vcpu_id;
+    int ret;
+
+    ASSERT(spin_is_locked(&its->its_lock));
+
+    if ( collid >= its->max_collections )
+        return NULL;
+
+    ret = vgic_access_guest_memory(its->d, addr + collid * sizeof(uint16_t),
+                                   &vcpu_id, sizeof(vcpu_id), false);
+    if ( ret )
+        return NULL;
+
+    if ( vcpu_id == UNMAPPED_COLLECTION || vcpu_id >= its->d->max_vcpus )
+        return NULL;
+
+    return its->d->vcpu[vcpu_id];
+}
+
+/*
+ * Our device table encodings:
+ * Contains the guest physical address of the Interrupt Translation Table in
+ * bits [51:8], and the size of it is encoded as the number of bits minus one
+ * in the lowest 5 bits of the word.
+ */
+#define DEV_TABLE_ENTRY_SIZE  8UL
+#define DEV_TABLE_ITT_ADDR(x) ((x) & GENMASK(51, 8))
+#define DEV_TABLE_ITT_SIZE(x) (BIT(((x) & GENMASK(4, 0)) + 1))
+#define DEV_TABLE_ENTRY(addr, bits)                     \
+        (((addr) & GENMASK(51, 8)) | (((bits) - 1) & GENMASK(4, 0)))
+
+/*
+ * Lookup the address of the Interrupt Translation Table associated with
+ * that device ID.
+ * TODO: add support for walking indirect tables.
+ */
+static int its_get_itt(struct virt_its *its, uint32_t devid,
+                       uint64_t *itt)
+{
+    paddr_t addr = get_baser_phys_addr(its->baser_dev);
+
+    if ( devid >= its->max_devices )
+        return -EINVAL;
+
+    return vgic_access_guest_memory(its->d, addr + devid * sizeof(uint64_t),
+                                    itt, sizeof(*itt), false);
+}
+
+/*
+ * Lookup the address of the Interrupt Translation Table associated with
+ * a device ID and return the address of the ITTE belonging to the event ID
+ * (which is an index into that table).
+ */
+static paddr_t its_get_itte_address(struct virt_its *its,
+                                    uint32_t devid, uint32_t evid)
+{
+    uint64_t itt;
+    int ret;
+
+    ret = its_get_itt(its, devid, &itt);
+    if ( ret )
+        return INVALID_PADDR;
+
+    if ( evid >= DEV_TABLE_ITT_SIZE(itt) ||
+         DEV_TABLE_ITT_ADDR(itt) == INVALID_PADDR )
+        return INVALID_PADDR;
+
+    return DEV_TABLE_ITT_ADDR(itt) + evid * sizeof(struct vits_itte);
+}
+
+/*
+ * Queries the collection and device tables to get the vCPU and virtual
+ * LPI number for a given guest event. This first accesses the guest memory
+ * to resolve the address of the ITTE, then reads the ITTE entry at this
+ * address and puts the result in vcpu_ptr and vlpi_ptr.
+ * Must be called with the ITS lock held.
+ */
+static bool read_itte_locked(struct virt_its *its, uint32_t devid,
+                             uint32_t evid, struct vcpu **vcpu_ptr,
+                             uint32_t *vlpi_ptr)
+{
+    paddr_t addr;
+    struct vits_itte itte;
+    struct vcpu *vcpu;
+
+    ASSERT(spin_is_locked(&its->its_lock));
+
+    addr = its_get_itte_address(its, devid, evid);
+    if ( addr == INVALID_PADDR )
+        return false;
+
+    if ( vgic_access_guest_memory(its->d, addr, &itte, sizeof(itte), false) )
+        return false;
+
+    vcpu = get_vcpu_from_collection(its, itte.collection);
+    if ( !vcpu )
+        return false;
+
+    *vcpu_ptr = vcpu;
+    *vlpi_ptr = itte.vlpi;
+    return true;
+}
+
+/*
+ * This function takes care of the locking by taking the its_lock itself, so
+ * a caller shall not hold this. Before returning, the lock is dropped again.
+ */
+bool read_itte(struct virt_its *its, uint32_t devid, uint32_t evid,
+               struct vcpu **vcpu_ptr, uint32_t *vlpi_ptr)
+{
+    bool ret;
+
+    spin_lock(&its->its_lock);
+    ret = read_itte_locked(its, devid, evid, vcpu_ptr, vlpi_ptr);
+    spin_unlock(&its->its_lock);
+
+    return ret;
+}
+
+/*
+ * Queries the collection and device tables to translate the device ID and
+ * event ID and find the appropriate ITTE. The given collection ID and the
+ * virtual LPI number are then stored into that entry.
+ * If vcpu_ptr is provided, returns the VCPU belonging to that collection.
+ * Must be called with the ITS lock held.
+ */
+bool write_itte_locked(struct virt_its *its, uint32_t devid,
+                       uint32_t evid, uint32_t collid, uint32_t vlpi,
+                       struct vcpu **vcpu_ptr)
+{
+    paddr_t addr;
+    struct vits_itte itte;
+
+    ASSERT(spin_is_locked(&its->its_lock));
+
+    if ( collid >= its->max_collections )
+        return false;
+
+    if ( vlpi >= its->d->arch.vgic.nr_lpis )
+        return false;
+
+    addr = its_get_itte_address(its, devid, evid);
+    if ( addr == INVALID_PADDR )
+        return false;
+
+    itte.collection = collid;
+    itte.vlpi = vlpi;
+
+    if ( vgic_access_guest_memory(its->d, addr, &itte, sizeof(itte), true) )
+        return false;
+
+    if ( vcpu_ptr )
+        *vcpu_ptr = get_vcpu_from_collection(its, collid);
+
+    return true;
+}
+
 /**************************************
  * Functions that handle ITS commands *
  **************************************/