diff mbox

[RFC,11/24] ARM: vGICv3: handle virtual LPI pending and property tables

Message ID 20160928182457.12433-12-andre.przywara@arm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andre Przywara Sept. 28, 2016, 6:24 p.m. UTC
Allow a guest to provide the address and size for the memory regions
it has reserved for the GICv3 pending and property tables.
We sanitise the various fields of the respective redistributor
registers and map those pages into Xen's address space to have easy
access.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
 xen/arch/arm/vgic-v3.c        | 189 ++++++++++++++++++++++++++++++++++++++----
 xen/arch/arm/vgic.c           |   4 +
 xen/include/asm-arm/domain.h  |   7 +-
 xen/include/asm-arm/gic-its.h |  10 ++-
 xen/include/asm-arm/vgic.h    |   3 +
 5 files changed, 197 insertions(+), 16 deletions(-)

Comments

Vijay Kilari Oct. 24, 2016, 3:32 p.m. UTC | #1
On Wed, Sep 28, 2016 at 11:54 PM, Andre Przywara <andre.przywara@arm.com> wrote:
> Allow a guest to provide the address and size for the memory regions
> it has reserved for the GICv3 pending and property tables.
> We sanitise the various fields of the respective redistributor
> registers and map those pages into Xen's address space to have easy
> access.
>
> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
> ---
>  xen/arch/arm/vgic-v3.c        | 189 ++++++++++++++++++++++++++++++++++++++----
>  xen/arch/arm/vgic.c           |   4 +
>  xen/include/asm-arm/domain.h  |   7 +-
>  xen/include/asm-arm/gic-its.h |  10 ++-
>  xen/include/asm-arm/vgic.h    |   3 +
>  5 files changed, 197 insertions(+), 16 deletions(-)
>
> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
> index e9b6490..8fe8386 100644
> --- a/xen/arch/arm/vgic-v3.c
> +++ b/xen/arch/arm/vgic-v3.c
> @@ -20,12 +20,14 @@
>
>  #include <xen/bitops.h>
>  #include <xen/config.h>
> +#include <xen/domain_page.h>
>  #include <xen/lib.h>
>  #include <xen/init.h>
>  #include <xen/softirq.h>
>  #include <xen/irq.h>
>  #include <xen/sched.h>
>  #include <xen/sizes.h>
> +#include <xen/vmap.h>
>  #include <asm/current.h>
>  #include <asm/mmio.h>
>  #include <asm/gic_v3_defs.h>
> @@ -228,12 +230,14 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
>          goto read_reserved;
>
>      case VREG64(GICR_PROPBASER):
> -        /* LPI's not implemented */
> -        goto read_as_zero_64;
> +        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> +        *r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info);
> +        return 1;
>
>      case VREG64(GICR_PENDBASER):
> -        /* LPI's not implemented */
> -        goto read_as_zero_64;
> +        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> +        *r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info);
> +        return 1;
>
>      case 0x0080:
>          goto read_reserved;
> @@ -301,11 +305,6 @@ bad_width:
>      domain_crash_synchronous();
>      return 0;
>
> -read_as_zero_64:
> -    if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> -    *r = 0;
> -    return 1;
> -
>  read_as_zero_32:
>      if ( dabt.size != DABT_WORD ) goto bad_width;
>      *r = 0;
> @@ -330,11 +329,149 @@ read_unknown:
>      return 1;
>  }
>
> +static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask,
> +                                    int field_shift,
> +                                    uint64_t (*sanitise_fn)(uint64_t))
> +{
> +    uint64_t field = (reg & field_mask) >> field_shift;
> +
> +    field = sanitise_fn(field) << field_shift;
> +    return (reg & ~field_mask) | field;
> +}
> +
> +/* We want to avoid outer shareable. */
> +static uint64_t vgic_sanitise_shareability(uint64_t field)
> +{
> +    switch (field) {
> +    case GIC_BASER_OuterShareable:
> +        return GIC_BASER_InnerShareable;
> +    default:
> +        return field;
> +    }
> +}
> +
> +/* Avoid any inner non-cacheable mapping. */
> +static uint64_t vgic_sanitise_inner_cacheability(uint64_t field)
> +{
> +    switch (field) {
> +    case GIC_BASER_CACHE_nCnB:
> +    case GIC_BASER_CACHE_nC:
> +        return GIC_BASER_CACHE_RaWb;
> +    default:
> +        return field;
> +    }
> +}
> +
> +/* Non-cacheable or same-as-inner are OK. */
> +static uint64_t vgic_sanitise_outer_cacheability(uint64_t field)
> +{
> +    switch (field) {
> +    case GIC_BASER_CACHE_SameAsInner:
> +    case GIC_BASER_CACHE_nC:
> +        return field;
> +    default:
> +        return GIC_BASER_CACHE_nC;
> +    }
> +}
> +
> +static uint64_t sanitize_propbaser(uint64_t reg)
> +{
> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
> +                              GICR_PROPBASER_SHAREABILITY_SHIFT,
> +                              vgic_sanitise_shareability);
> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
> +                              GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_inner_cacheability);
> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
> +                              GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_outer_cacheability);
> +
> +    reg &= ~PROPBASER_RES0_MASK;
> +    reg &= ~GENMASK(51, 48);
> +    return reg;
> +}
> +
> +static uint64_t sanitize_pendbaser(uint64_t reg)
> +{
> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
> +                              GICR_PENDBASER_SHAREABILITY_SHIFT,
> +                              vgic_sanitise_shareability);
> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
> +                              GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_inner_cacheability);
> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
> +                              GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_outer_cacheability);
> +
> +    reg &= ~PENDBASER_RES0_MASK;
> +    reg &= ~GENMASK(51, 48);
> +    return reg;
> +}
> +
> +/*
> + * Allow mapping some parts of guest memory into Xen's VA space to have easy
> + * access to it. This is to allow ITS configuration data to be held in
> + * guest memory and avoid using Xen memory for that.
> + */
> +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages)
   I think this file is not right place to put this generic function
> +{
> +    mfn_t onepage;
> +    mfn_t *pages;
> +    int i;
> +    void *ptr;
> +
> +    /* TODO: free previous mapping, change prototype? use get-put-put? */
> +
> +    guest_addr &= PAGE_MASK;
> +
> +    if ( nr_pages == 1 )
> +    {
> +        pages = &onepage;
> +    } else
> +    {
> +        pages = xmalloc_array(mfn_t, nr_pages);
> +        if ( !pages )
> +            return NULL;
> +    }
> +
> +    for (i = 0; i < nr_pages; i++)
> +    {
> +        get_page_from_gfn(d, (guest_addr >> PAGE_SHIFT) + i, NULL, P2M_ALLOC);

             check return value of this function

> +        pages[i] = _mfn((guest_addr + i * PAGE_SIZE) >> PAGE_SHIFT);
> +    }
> +
> +    ptr = vmap(pages, nr_pages);
> +
> +    if ( nr_pages > 1 )
> +        xfree(pages);
> +
> +    return ptr;
> +}
> +
> +void unmap_guest_pages(void *va, int nr_pages)
      same here. Can be put in generic file p2m.c?
> +{
> +    paddr_t pa;
> +    unsigned long i;
> +
> +    if ( !va )
> +        return;
> +
> +    va = (void *)((uintptr_t)va & PAGE_MASK);
> +    pa = virt_to_maddr(va);
  can use _pa()
> +
> +    vunmap(va);
> +    for (i = 0; i < nr_pages; i++)
> +        put_page(mfn_to_page((pa >> PAGE_SHIFT) + i));
> +
> +    return;
> +}
> +
>  static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
>                                            uint32_t gicr_reg,
>                                            register_t r)
>  {
>      struct hsr_dabt dabt = info->dabt;
> +    uint64_t reg;
>
>      switch ( gicr_reg )
>      {
> @@ -375,13 +512,37 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
>      case 0x0050:
>          goto write_reserved;
>
> -    case VREG64(GICR_PROPBASER):
> -        /* LPI is not implemented */
> -        goto write_ignore_64;
> +    case VREG64(GICR_PROPBASER): {
> +        int nr_pages;
> +
> +        if ( info->dabt.size < DABT_WORD ) goto bad_width;
> +        if ( v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED )
> +            return 1;
> +
> +        reg = v->domain->arch.vgic.rdist_propbase;
> +        vgic_reg64_update(&reg, r, info);
> +        reg = sanitize_propbaser(reg);
> +        v->domain->arch.vgic.rdist_propbase = reg;
>
> +        nr_pages = BIT((v->domain->arch.vgic.rdist_propbase & 0x1f) + 1) - 8192;
             should be validated against HOST_LPIS?

> +        nr_pages = DIV_ROUND_UP(nr_pages, PAGE_SIZE);
> +        unmap_guest_pages(v->domain->arch.vgic.proptable, nr_pages);
> +        v->domain->arch.vgic.proptable = map_guest_pages(v->domain,
> +                                                         reg & GENMASK(47, 12),
> +                                                         nr_pages);
> +        return 1;
> +    }
>      case VREG64(GICR_PENDBASER):
> -        /* LPI is not implemented */
> -        goto write_ignore_64;
> +        if ( info->dabt.size < DABT_WORD ) goto bad_width;

   check on VGIC_V3_LPIS_ENABLED is required
> +       reg = v->arch.vgic.rdist_pendbase;
> +       vgic_reg64_update(&reg, r, info);
> +       reg = sanitize_pendbaser(reg);
> +       v->arch.vgic.rdist_pendbase = reg;
> +
> +        unmap_guest_pages(v->arch.vgic.pendtable, 16);
     why only 16 pages are unmapped?
> +       v->arch.vgic.pendtable = map_guest_pages(v->domain,
> +                                                 reg & GENMASK(47, 12), 16);
> +       return 1;
>
>      case 0x0080:
>          goto write_reserved;
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index b961551..4d9304f 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -488,6 +488,10 @@ struct pending_irq *lpi_to_pending(struct vcpu *v, unsigned int lpi,
>          empty->pirq.irq = lpi;
>      }
>
> +    /* Update the enabled status */
> +    if ( gicv3_lpi_is_enabled(v->domain, lpi) )
> +        set_bit(GIC_IRQ_GUEST_ENABLED, &empty->pirq.status);
> +
>      return &empty->pirq;
>  }
>
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index ae8a9de..0cd3500 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -109,6 +109,8 @@ struct arch_domain
>          } *rdist_regions;
>          int nr_regions;                     /* Number of rdist regions */
>          uint32_t rdist_stride;              /* Re-Distributor stride */
> +        uint64_t rdist_propbase;
> +        uint8_t *proptable;
>  #endif
>      } vgic;
>
> @@ -247,7 +249,10 @@ struct arch_vcpu
>
>          /* GICv3: redistributor base and flags for this vCPU */
>          paddr_t rdist_base;
> -#define VGIC_V3_RDIST_LAST  (1 << 0)        /* last vCPU of the rdist */
> +#define VGIC_V3_RDIST_LAST      (1 << 0)        /* last vCPU of the rdist */
> +#define VGIC_V3_LPIS_ENABLED    (1 << 1)
> +        uint64_t rdist_pendbase;
> +        unsigned long *pendtable;
>          uint8_t flags;
>          struct list_head pending_lpi_list;
>      } vgic;
> diff --git a/xen/include/asm-arm/gic-its.h b/xen/include/asm-arm/gic-its.h
> index 1f881c0..3b2e5c0 100644
> --- a/xen/include/asm-arm/gic-its.h
> +++ b/xen/include/asm-arm/gic-its.h
> @@ -139,7 +139,11 @@ int gicv3_lpi_drop_host_lpi(struct host_its *its,
>
>  static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
>  {
> -    return GIC_PRI_IRQ;
> +    return d->arch.vgic.proptable[lpi - 8192] & 0xfc;
> +}
> +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi)
> +{
> +    return d->arch.vgic.proptable[lpi - 8192] & LPI_PROP_ENABLED;
>  }
>
>  #else
> @@ -185,6 +189,10 @@ static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
>  {
>      return GIC_PRI_IRQ;
>  }
> +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi)
> +{
> +    return false;
> +}
>
>  #endif /* CONFIG_HAS_ITS */
>
> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
> index 4e29ba6..2b216cc 100644
> --- a/xen/include/asm-arm/vgic.h
> +++ b/xen/include/asm-arm/vgic.h
> @@ -285,6 +285,9 @@ VGIC_REG_HELPERS(32, 0x3);
>
>  #undef VGIC_REG_HELPERS
>
> +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages);
> +void unmap_guest_pages(void *va, int nr_pages);
> +
>  enum gic_sgi_mode;
>
>  /*
> --
> 2.9.0
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
Stefano Stabellini Oct. 29, 2016, 12:39 a.m. UTC | #2
On Wed, 28 Sep 2016, Andre Przywara wrote:
> Allow a guest to provide the address and size for the memory regions
> it has reserved for the GICv3 pending and property tables.
> We sanitise the various fields of the respective redistributor
> registers and map those pages into Xen's address space to have easy
> access.
> 
> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
> ---
>  xen/arch/arm/vgic-v3.c        | 189 ++++++++++++++++++++++++++++++++++++++----
>  xen/arch/arm/vgic.c           |   4 +
>  xen/include/asm-arm/domain.h  |   7 +-
>  xen/include/asm-arm/gic-its.h |  10 ++-
>  xen/include/asm-arm/vgic.h    |   3 +
>  5 files changed, 197 insertions(+), 16 deletions(-)
> 
> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
> index e9b6490..8fe8386 100644
> --- a/xen/arch/arm/vgic-v3.c
> +++ b/xen/arch/arm/vgic-v3.c
> @@ -20,12 +20,14 @@
>  
>  #include <xen/bitops.h>
>  #include <xen/config.h>
> +#include <xen/domain_page.h>
>  #include <xen/lib.h>
>  #include <xen/init.h>
>  #include <xen/softirq.h>
>  #include <xen/irq.h>
>  #include <xen/sched.h>
>  #include <xen/sizes.h>
> +#include <xen/vmap.h>
>  #include <asm/current.h>
>  #include <asm/mmio.h>
>  #include <asm/gic_v3_defs.h>
> @@ -228,12 +230,14 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
>          goto read_reserved;
>  
>      case VREG64(GICR_PROPBASER):
> -        /* LPI's not implemented */
> -        goto read_as_zero_64;
> +        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> +        *r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info);
> +        return 1;
>  
>      case VREG64(GICR_PENDBASER):
> -        /* LPI's not implemented */
> -        goto read_as_zero_64;
> +        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> +        *r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info);
> +        return 1;
>  
>      case 0x0080:
>          goto read_reserved;
> @@ -301,11 +305,6 @@ bad_width:
>      domain_crash_synchronous();
>      return 0;
>  
> -read_as_zero_64:
> -    if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> -    *r = 0;
> -    return 1;
> -
>  read_as_zero_32:
>      if ( dabt.size != DABT_WORD ) goto bad_width;
>      *r = 0;
> @@ -330,11 +329,149 @@ read_unknown:
>      return 1;
>  }
>  
> +static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask,
> +                                    int field_shift,
> +                                    uint64_t (*sanitise_fn)(uint64_t))
> +{
> +    uint64_t field = (reg & field_mask) >> field_shift;
> +
> +    field = sanitise_fn(field) << field_shift;
> +    return (reg & ~field_mask) | field;
> +}
> +
> +/* We want to avoid outer shareable. */
> +static uint64_t vgic_sanitise_shareability(uint64_t field)
> +{
> +    switch (field) {
> +    case GIC_BASER_OuterShareable:
> +        return GIC_BASER_InnerShareable;
> +    default:
> +        return field;
> +    }
> +}
> +
> +/* Avoid any inner non-cacheable mapping. */
> +static uint64_t vgic_sanitise_inner_cacheability(uint64_t field)
> +{
> +    switch (field) {
> +    case GIC_BASER_CACHE_nCnB:
> +    case GIC_BASER_CACHE_nC:
> +        return GIC_BASER_CACHE_RaWb;
> +    default:
> +        return field;
> +    }
> +}
> +
> +/* Non-cacheable or same-as-inner are OK. */
> +static uint64_t vgic_sanitise_outer_cacheability(uint64_t field)
> +{
> +    switch (field) {
> +    case GIC_BASER_CACHE_SameAsInner:
> +    case GIC_BASER_CACHE_nC:
> +        return field;
> +    default:
> +        return GIC_BASER_CACHE_nC;
> +    }
> +}
> +
> +static uint64_t sanitize_propbaser(uint64_t reg)
> +{
> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
> +                              GICR_PROPBASER_SHAREABILITY_SHIFT,
> +                              vgic_sanitise_shareability);
> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
> +                              GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_inner_cacheability);
> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
> +                              GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_outer_cacheability);
> +
> +    reg &= ~PROPBASER_RES0_MASK;
> +    reg &= ~GENMASK(51, 48);
> +    return reg;
> +}
> +
> +static uint64_t sanitize_pendbaser(uint64_t reg)
> +{
> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
> +                              GICR_PENDBASER_SHAREABILITY_SHIFT,
> +                              vgic_sanitise_shareability);
> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
> +                              GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_inner_cacheability);
> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
> +                              GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_outer_cacheability);
> +
> +    reg &= ~PENDBASER_RES0_MASK;
> +    reg &= ~GENMASK(51, 48);
> +    return reg;
> +}
> +
> +/*
> + * Allow mapping some parts of guest memory into Xen's VA space to have easy
> + * access to it. This is to allow ITS configuration data to be held in
> + * guest memory and avoid using Xen memory for that.
> + */
> +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages)

map_guest_pages and unmap_guest_pages are not ARM specific and should
live somewhere in xen/common, maybe xen/common/vmap.c.


> +{
> +    mfn_t onepage;
> +    mfn_t *pages;
> +    int i;
> +    void *ptr;
> +
> +    /* TODO: free previous mapping, change prototype? use get-put-put? */

No need: the caller is already doing that, right?


> +    guest_addr &= PAGE_MASK;
> +
> +    if ( nr_pages == 1 )
> +    {
> +        pages = &onepage;
> +    } else
> +    {
> +        pages = xmalloc_array(mfn_t, nr_pages);
> +        if ( !pages )
> +            return NULL;
> +    }

How often do you think only 1 page will be used? If the answer is not
often, I would get rid of the onepage optimization.


> +    for (i = 0; i < nr_pages; i++)
> +    {
> +        get_page_from_gfn(d, (guest_addr >> PAGE_SHIFT) + i, NULL, P2M_ALLOC);
> +        pages[i] = _mfn((guest_addr + i * PAGE_SIZE) >> PAGE_SHIFT);

Don't you need to call page_to_mfn (or gfn_to_mfn) to get the mfn?


> +    }
> +
> +    ptr = vmap(pages, nr_pages);

It is possible for vmap to fail and return NULL. Maybe because the guest
intentionally tried to break the hypervisor by passing an array of pages
that ends in an MMIO region. We need to check vmap errors and handle
them.


> +    if ( nr_pages > 1 )
> +        xfree(pages);
> +
> +    return ptr;
> +}
> +
> +void unmap_guest_pages(void *va, int nr_pages)
> +{
> +    paddr_t pa;
> +    unsigned long i;
> +
> +    if ( !va )
> +        return;
> +
> +    va = (void *)((uintptr_t)va & PAGE_MASK);
> +    pa = virt_to_maddr(va);
> +
> +    vunmap(va);
> +    for (i = 0; i < nr_pages; i++)
> +        put_page(mfn_to_page((pa >> PAGE_SHIFT) + i));
> +
> +    return;
> +}
> +
>  static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
>                                            uint32_t gicr_reg,
>                                            register_t r)
>  {
>      struct hsr_dabt dabt = info->dabt;
> +    uint64_t reg;
>  
>      switch ( gicr_reg )
>      {
> @@ -375,13 +512,37 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
>      case 0x0050:
>          goto write_reserved;
>  
> -    case VREG64(GICR_PROPBASER):
> -        /* LPI is not implemented */
> -        goto write_ignore_64;
> +    case VREG64(GICR_PROPBASER): {
> +        int nr_pages;

unsigned int


> +        if ( info->dabt.size < DABT_WORD ) goto bad_width;

Why not use vgic_reg64_check_access?


> +        if ( v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED )
> +            return 1;

Why? If there is a good reason for this, it is probably worth writing it
in an in-code comment.


> +        reg = v->domain->arch.vgic.rdist_propbase;
> +        vgic_reg64_update(&reg, r, info);
> +        reg = sanitize_propbaser(reg);
> +        v->domain->arch.vgic.rdist_propbase = reg;
>  
> +        nr_pages = BIT((v->domain->arch.vgic.rdist_propbase & 0x1f) + 1) - 8192;
> +        nr_pages = DIV_ROUND_UP(nr_pages, PAGE_SIZE);

Do we need to set an upper limit on nr_pages? We don't really want to
allow (2^0x1f)/4096 pages, right?


> +        unmap_guest_pages(v->domain->arch.vgic.proptable, nr_pages);
> +        v->domain->arch.vgic.proptable = map_guest_pages(v->domain,
> +                                                         reg & GENMASK(47, 12),
> +                                                         nr_pages);

I am pretty sure I am reading the right spec now and it should be
GENMASK(51, 12). Also, don't we need to sanitize the table too before
using it?


> +        return 1;
> +    }
>      case VREG64(GICR_PENDBASER):
> -        /* LPI is not implemented */
> -        goto write_ignore_64;
> +        if ( info->dabt.size < DABT_WORD ) goto bad_width;

Why not use vgic_reg64_check_access?


> +	reg = v->arch.vgic.rdist_pendbase;
> +	vgic_reg64_update(&reg, r, info);
> +	reg = sanitize_pendbaser(reg);
> +	v->arch.vgic.rdist_pendbase = reg;
> +        unmap_guest_pages(v->arch.vgic.pendtable, 16);
> +	v->arch.vgic.pendtable = map_guest_pages(v->domain,
> +                                                 reg & GENMASK(47, 12), 16);

Indentation.
We need to make sure the vmap mapping is correct. Also we need to
sanitize this table too before using it.


> +	return 1;
>  
>      case 0x0080:
>          goto write_reserved;
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index b961551..4d9304f 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -488,6 +488,10 @@ struct pending_irq *lpi_to_pending(struct vcpu *v, unsigned int lpi,
>          empty->pirq.irq = lpi;
>      }
>  
> +    /* Update the enabled status */
> +    if ( gicv3_lpi_is_enabled(v->domain, lpi) )
> +        set_bit(GIC_IRQ_GUEST_ENABLED, &empty->pirq.status);

Where is the GIC_IRQ_GUEST_ENABLED unset?


>      return &empty->pirq;
>  }
>  
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index ae8a9de..0cd3500 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -109,6 +109,8 @@ struct arch_domain
>          } *rdist_regions;
>          int nr_regions;                     /* Number of rdist regions */
>          uint32_t rdist_stride;              /* Re-Distributor stride */
> +        uint64_t rdist_propbase;
> +        uint8_t *proptable;

Do we need to keep both rdist_propbase and proptable? It is easy to go
from proptable to rdist_propbase and I guess it is not an operation that
is done often? If so, we could save some memory and remove it.


>  #endif
>      } vgic;
>  
> @@ -247,7 +249,10 @@ struct arch_vcpu
>  
>          /* GICv3: redistributor base and flags for this vCPU */
>          paddr_t rdist_base;
> -#define VGIC_V3_RDIST_LAST  (1 << 0)        /* last vCPU of the rdist */
> +#define VGIC_V3_RDIST_LAST      (1 << 0)        /* last vCPU of the rdist */
> +#define VGIC_V3_LPIS_ENABLED    (1 << 1)
> +        uint64_t rdist_pendbase;
> +        unsigned long *pendtable;

Same here.


>          uint8_t flags;
>          struct list_head pending_lpi_list;
>      } vgic;
> diff --git a/xen/include/asm-arm/gic-its.h b/xen/include/asm-arm/gic-its.h
> index 1f881c0..3b2e5c0 100644
> --- a/xen/include/asm-arm/gic-its.h
> +++ b/xen/include/asm-arm/gic-its.h
> @@ -139,7 +139,11 @@ int gicv3_lpi_drop_host_lpi(struct host_its *its,
>  
>  static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
>  {
> -    return GIC_PRI_IRQ;
> +    return d->arch.vgic.proptable[lpi - 8192] & 0xfc;

Please #define 0xfc. Do we need to check for lpi overflows? As in lpi
numbers larger than proptable size?


> +}
> +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi)
> +{
> +    return d->arch.vgic.proptable[lpi - 8192] & LPI_PROP_ENABLED;
>  }
>  
>  #else
> @@ -185,6 +189,10 @@ static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
>  {
>      return GIC_PRI_IRQ;
>  }
> +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi)
> +{
> +    return false;
> +}
>  
>  #endif /* CONFIG_HAS_ITS */
>  
> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
> index 4e29ba6..2b216cc 100644
> --- a/xen/include/asm-arm/vgic.h
> +++ b/xen/include/asm-arm/vgic.h
> @@ -285,6 +285,9 @@ VGIC_REG_HELPERS(32, 0x3);
>  
>  #undef VGIC_REG_HELPERS
>  
> +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages);
> +void unmap_guest_pages(void *va, int nr_pages);
> +
>  enum gic_sgi_mode;
>  
>  /*
Julien Grall Nov. 2, 2016, 5:18 p.m. UTC | #3
Hi Andre,

On 28/09/16 19:24, Andre Przywara wrote:
> Allow a guest to provide the address and size for the memory regions
> it has reserved for the GICv3 pending and property tables.
> We sanitise the various fields of the respective redistributor
> registers and map those pages into Xen's address space to have easy
> access.
>
> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
> ---
>  xen/arch/arm/vgic-v3.c        | 189 ++++++++++++++++++++++++++++++++++++++----
>  xen/arch/arm/vgic.c           |   4 +
>  xen/include/asm-arm/domain.h  |   7 +-
>  xen/include/asm-arm/gic-its.h |  10 ++-
>  xen/include/asm-arm/vgic.h    |   3 +
>  5 files changed, 197 insertions(+), 16 deletions(-)
>
> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
> index e9b6490..8fe8386 100644
> --- a/xen/arch/arm/vgic-v3.c
> +++ b/xen/arch/arm/vgic-v3.c
> @@ -20,12 +20,14 @@
>
>  #include <xen/bitops.h>
>  #include <xen/config.h>
> +#include <xen/domain_page.h>
>  #include <xen/lib.h>
>  #include <xen/init.h>
>  #include <xen/softirq.h>
>  #include <xen/irq.h>
>  #include <xen/sched.h>
>  #include <xen/sizes.h>
> +#include <xen/vmap.h>
>  #include <asm/current.h>
>  #include <asm/mmio.h>
>  #include <asm/gic_v3_defs.h>
> @@ -228,12 +230,14 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
>          goto read_reserved;
>
>      case VREG64(GICR_PROPBASER):
> -        /* LPI's not implemented */
> -        goto read_as_zero_64;
> +        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> +        *r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info);
> +        return 1;
>
>      case VREG64(GICR_PENDBASER):
> -        /* LPI's not implemented */
> -        goto read_as_zero_64;
> +        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> +        *r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info);

The field PTZ read as 0.

> +        return 1;
>
>      case 0x0080:
>          goto read_reserved;
> @@ -301,11 +305,6 @@ bad_width:
>      domain_crash_synchronous();
>      return 0;
>
> -read_as_zero_64:
> -    if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> -    *r = 0;
> -    return 1;
> -
>  read_as_zero_32:
>      if ( dabt.size != DABT_WORD ) goto bad_width;
>      *r = 0;
> @@ -330,11 +329,149 @@ read_unknown:
>      return 1;
>  }
>
> +static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask,
> +                                    int field_shift,
> +                                    uint64_t (*sanitise_fn)(uint64_t))
> +{
> +    uint64_t field = (reg & field_mask) >> field_shift;
> +
> +    field = sanitise_fn(field) << field_shift;

Newline here please.

> +    return (reg & ~field_mask) | field;
> +}
> +
> +/* We want to avoid outer shareable. */
> +static uint64_t vgic_sanitise_shareability(uint64_t field)
> +{
> +    switch (field) {
> +    case GIC_BASER_OuterShareable:
> +        return GIC_BASER_InnerShareable;
> +    default:
> +        return field;
> +    }
> +}

I am not sure to understand why we need to sanitise the value here. From 
my understanding of the spec (see 8.11.18 in IHI 0069C) we should 
support any shareability/cacheability, correct?

> +
> +/* Avoid any inner non-cacheable mapping. */
> +static uint64_t vgic_sanitise_inner_cacheability(uint64_t field)
> +{
> +    switch (field) {
> +    case GIC_BASER_CACHE_nCnB:
> +    case GIC_BASER_CACHE_nC:
> +        return GIC_BASER_CACHE_RaWb;
> +    default:
> +        return field;
> +    }
> +}
> +
> +/* Non-cacheable or same-as-inner are OK. */
> +static uint64_t vgic_sanitise_outer_cacheability(uint64_t field)
> +{
> +    switch (field) {
> +    case GIC_BASER_CACHE_SameAsInner:
> +    case GIC_BASER_CACHE_nC:
> +        return field;
> +    default:
> +        return GIC_BASER_CACHE_nC;
> +    }
> +}
> +
> +static uint64_t sanitize_propbaser(uint64_t reg)
> +{
> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
> +                              GICR_PROPBASER_SHAREABILITY_SHIFT,
> +                              vgic_sanitise_shareability);
> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
> +                              GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_inner_cacheability);
> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
> +                              GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_outer_cacheability);
> +
> +    reg &= ~PROPBASER_RES0_MASK;
> +    reg &= ~GENMASK(51, 48);

Why do you mask the bits 51:48. There is no restriction in Xen about the 
size of the IPA (though 52 bits support is part of ARMv8.2), so we 
should avoid to open-code mask everywhere in the code. Otherwise it will 
be more painful to extend the number of bits supported.

FWIW, all the p2m code is checking whether the IPA is supported.

> +    return reg;
> +}
> +
> +static uint64_t sanitize_pendbaser(uint64_t reg)
> +{
> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
> +                              GICR_PENDBASER_SHAREABILITY_SHIFT,
> +                              vgic_sanitise_shareability);
> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
> +                              GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_inner_cacheability);
> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
> +                              GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_outer_cacheability);
> +
> +    reg &= ~PENDBASER_RES0_MASK;
> +    reg &= ~GENMASK(51, 48);

Ditto.

> +    return reg;
> +}
> +
> +/*
> + * Allow mapping some parts of guest memory into Xen's VA space to have easy
> + * access to it. This is to allow ITS configuration data to be held in
> + * guest memory and avoid using Xen memory for that.
> + */
> +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages)

Please pass a gfn_t rather than paddr_t.

> +{
> +    mfn_t onepage;
> +    mfn_t *pages;

s/pages/mfns/

> +    int i;
> +    void *ptr;
> +
> +    /* TODO: free previous mapping, change prototype? use get-put-put? */
> +
> +    guest_addr &= PAGE_MASK;
> +
> +    if ( nr_pages == 1 )
> +    {
> +        pages = &onepage;
> +    } else
> +    {
> +        pages = xmalloc_array(mfn_t, nr_pages);
> +        if ( !pages )
> +            return NULL;
> +    }
> +
> +    for (i = 0; i < nr_pages; i++)
> +    {
> +        get_page_from_gfn(d, (guest_addr >> PAGE_SHIFT) + i, NULL, P2M_ALLOC);

get_page_from_gfn can fail if you try to get a page on memory that is 
not baked by a RAM region. Also get_page_from_gfn will work on foreign 
mapping, we don't want the guest using foreing memory (e.g memory 
belonging to another domain) for the ITS internal memory.

Also, please try to pay attention for error path whilst you write code. 
It is a pain to handle them after the code has been written. I will try 
to point them when I spot it.

> +        pages[i] = _mfn((guest_addr + i * PAGE_SIZE) >> PAGE_SHIFT);

You cannot assume a 1:1 mapping between the IPA and the PA. Please use 
the struct page_info returned by get_page_from_gfn

> +    }
> +
> +    ptr = vmap(pages, nr_pages);

I am not a big fan of the vmap solution for various reasons:
	- the VMAP area is small (only 1GB) it will not scale (you seem to use 
it to map pretty much all memory provisioned for the ITS)
	- writing in a register cannot fail, how do you co-op with that?

I think the best approach here is to use a similar approach as 
copy_*_guests helpers but dealing with IPA rather than guest VA.

> +
> +    if ( nr_pages > 1 )
> +        xfree(pages);
> +
> +    return ptr;
> +}
> +
> +void unmap_guest_pages(void *va, int nr_pages)
> +{
> +    paddr_t pa;
> +    unsigned long i;
> +
> +    if ( !va )
> +        return;
> +
> +    va = (void *)((uintptr_t)va & PAGE_MASK);
> +    pa = virt_to_maddr(va);
> +
> +    vunmap(va);
> +    for (i = 0; i < nr_pages; i++)
> +        put_page(mfn_to_page((pa >> PAGE_SHIFT) + i));
> +
> +    return;
> +}
> +
>  static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
>                                            uint32_t gicr_reg,
>                                            register_t r)
>  {
>      struct hsr_dabt dabt = info->dabt;
> +    uint64_t reg;
>
>      switch ( gicr_reg )
>      {
> @@ -375,13 +512,37 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
>      case 0x0050:
>          goto write_reserved;
>
> -    case VREG64(GICR_PROPBASER):
> -        /* LPI is not implemented */
> -        goto write_ignore_64;
> +    case VREG64(GICR_PROPBASER): {

Coding style, the { should be on a line.

> +        int nr_pages;

unsigned int

> +
> +        if ( info->dabt.size < DABT_WORD ) goto bad_width;

Newline here for clarity. Also please use vgic_reg64_check_access rather 
than open-coding it.

> +        if ( v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED )

 From my understanding VGIC_V3_LPIS_ENABLED is set when the guest enable 
LPIs on this re-distributor. However, this check is not safe as 
GICR_CTLR.Enable_LPIs may be set concurrently (the re-distributors are 
accessible from any vCPU).

Also, when ITS is not available we should avoid to handle the register 
(i.e treating as write ignore). My rational here is we should limit the 
amount of emulation exposed to the guest whenever it is possible.

> +            return 1;

I think we should at least print warning as writing to GICR_PROPBASER 
when GICR_CTLR.Enable_LPIs is set is unpredictable. IHMO, I would even 
crash the guest.

The code below likely needs locking as the property table is common to 
all re-distributor, hence could be modified concurrently. Also, I would 
like to see a comment on top of emulation of GICR_TYPER to mention that 
all re-distributor shares the same common property table 
(GICR_TYPER.CommonLPIAff = 0).

> +
> +        reg = v->domain->arch.vgic.rdist_propbase;
> +        vgic_reg64_update(&reg, r, info);
> +        reg = sanitize_propbaser(reg);
> +        v->domain->arch.vgic.rdist_propbase = reg;
>
> +        nr_pages = BIT((v->domain->arch.vgic.rdist_propbase & 0x1f) + 1) - 8192;

The spec (see 8.11.19): "If the value of this field is larger than the 
value of GICD_TYPER.IDbits, the GICD_TYPER.IDbits value applies). We 
don't want to map more than necessary uin

> +        nr_pages = DIV_ROUND_UP(nr_pages, PAGE_SIZE);
> +        unmap_guest_pages(v->domain->arch.vgic.proptable, nr_pages);

This looks wrong to me. A guest could specify a size different from the 
previous write.

> +        v->domain->arch.vgic.proptable = map_guest_pages(v->domain,
> +                                                         reg & GENMASK(47, 12),
> +                                                         nr_pages);
> +        return 1;
> +    }
>      case VREG64(GICR_PENDBASER):
> -        /* LPI is not implemented */
> -        goto write_ignore_64;
> +        if ( info->dabt.size < DABT_WORD ) goto bad_width;

Newline + vgic_reg64_check_access

Also, you don't check whether the LPIs have been enabled here.

All my comments above stands. Furthermore, the code is not correctly 
indented (you are using hard tab).

> +	reg = v->arch.vgic.rdist_pendbase;
> +	vgic_reg64_update(&reg, r, info);
> +	reg = sanitize_pendbaser(reg);
> +	v->arch.vgic.rdist_pendbase = reg;
> +
> +        unmap_guest_pages(v->arch.vgic.pendtable, 16);
> +	v->arch.vgic.pendtable = map_guest_pages(v->domain,
> +                                                 reg & GENMASK(47, 12), 16);

The pending table is never touched by Xen. So I would avoid to mapping it.

> +	return 1;
>
>      case 0x0080:
>          goto write_reserved;
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index b961551..4d9304f 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -488,6 +488,10 @@ struct pending_irq *lpi_to_pending(struct vcpu *v, unsigned int lpi,
>          empty->pirq.irq = lpi;
>      }
>
> +    /* Update the enabled status */
> +    if ( gicv3_lpi_is_enabled(v->domain, lpi) )
> +        set_bit(GIC_IRQ_GUEST_ENABLED, &empty->pirq.status);
> +
>      return &empty->pirq;
>  }
>
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index ae8a9de..0cd3500 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -109,6 +109,8 @@ struct arch_domain
>          } *rdist_regions;
>          int nr_regions;                     /* Number of rdist regions */
>          uint32_t rdist_stride;              /* Re-Distributor stride */
> +        uint64_t rdist_propbase;
> +        uint8_t *proptable;
>  #endif
>      } vgic;
>
> @@ -247,7 +249,10 @@ struct arch_vcpu
>
>          /* GICv3: redistributor base and flags for this vCPU */
>          paddr_t rdist_base;
> -#define VGIC_V3_RDIST_LAST  (1 << 0)        /* last vCPU of the rdist */
> +#define VGIC_V3_RDIST_LAST      (1 << 0)        /* last vCPU of the rdist */

Please avoid spurious change. We don't require in Xen to have all the 
constant aligned. This also makes harder to got through the changes.

> +#define VGIC_V3_LPIS_ENABLED    (1 << 1)

Please document the purpose of this bit.

> +        uint64_t rdist_pendbase;
> +        unsigned long *pendtable;
>          uint8_t flags;
>          struct list_head pending_lpi_list;
>      } vgic;
> diff --git a/xen/include/asm-arm/gic-its.h b/xen/include/asm-arm/gic-its.h
> index 1f881c0..3b2e5c0 100644
> --- a/xen/include/asm-arm/gic-its.h
> +++ b/xen/include/asm-arm/gic-its.h
> @@ -139,7 +139,11 @@ int gicv3_lpi_drop_host_lpi(struct host_its *its,
>
>  static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)

s/lpi/vlpi/ to make clear this is a function deal with virtual LPIs.

>  {
> -    return GIC_PRI_IRQ;
> +    return d->arch.vgic.proptable[lpi - 8192] & 0xfc;

I think this is the best place to ask this question, I don't see any 
code within this series to check that the guest effectively initialized 
proptable and the size is correct (you don't check that the guest 
provided enough memory compare to the vLPI suggested).

FWIW, I have only already made those comments back Vijay sent his patch 
series. It might be worth for you to look at what he did regarding all 
the sanity checks.

> +}

Newline here for clarity.

> +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi)

Ditto for the naming.

> +{
> +    return d->arch.vgic.proptable[lpi - 8192] & LPI_PROP_ENABLED;
>  }
>
>  #else
> @@ -185,6 +189,10 @@ static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
>  {
>      return GIC_PRI_IRQ;
>  }

Newline here for clarity.

> +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi)
> +{
> +    return false;
> +}
>
>  #endif /* CONFIG_HAS_ITS */
>
> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
> index 4e29ba6..2b216cc 100644
> --- a/xen/include/asm-arm/vgic.h
> +++ b/xen/include/asm-arm/vgic.h
> @@ -285,6 +285,9 @@ VGIC_REG_HELPERS(32, 0x3);
>
>  #undef VGIC_REG_HELPERS
>
> +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages);
> +void unmap_guest_pages(void *va, int nr_pages);
> +
>  enum gic_sgi_mode;
>
>  /*
>

Regards,
Stefano Stabellini Nov. 2, 2016, 5:41 p.m. UTC | #4
On Wed, 2 Nov 2016, Julien Grall wrote:
> > +    }
> > +
> > +    ptr = vmap(pages, nr_pages);
> 
> I am not a big fan of the vmap solution for various reasons:
> 	- the VMAP area is small (only 1GB) it will not scale (you seem
> to use it to map pretty much all memory provisioned for the ITS)
> 	- writing in a register cannot fail, how do you co-op with that?
> 
> I think the best approach here is to use a similar approach as
> copy_*_guests helpers but dealing with IPA rather than guest VA.

I don't like the idea of using the vmap for this either, but the problem
with the copy_*_guest approach is that it only maps one page at a time.
It is unable to map multiple pages contiguously, which seems to be
required here.
Julien Grall Nov. 2, 2016, 6:03 p.m. UTC | #5
Hi Stefano,

On 02/11/16 17:41, Stefano Stabellini wrote:
> On Wed, 2 Nov 2016, Julien Grall wrote:
>>> +    }
>>> +
>>> +    ptr = vmap(pages, nr_pages);
>>
>> I am not a big fan of the vmap solution for various reasons:
>> 	- the VMAP area is small (only 1GB) it will not scale (you seem
>> to use it to map pretty much all memory provisioned for the ITS)
>> 	- writing in a register cannot fail, how do you co-op with that?
>>
>> I think the best approach here is to use a similar approach as
>> copy_*_guests helpers but dealing with IPA rather than guest VA.
>
> I don't like the idea of using the vmap for this either, but the problem
> with the copy_*_guest approach is that it only maps one page at a time.
> It is unable to map multiple pages contiguously, which seems to be
> required here.

We will get into trouble very quickly with the vmap solution. The memory 
provisioned by the ITS can be quite big. Each GITS_BASER can hold up to 
16MB (if I computed correctly) of memory.

The question is why do we need to map the page contiguously? Can we do 
in a different way?

Regards,
Stefano Stabellini Nov. 2, 2016, 6:09 p.m. UTC | #6
On Wed, 2 Nov 2016, Julien Grall wrote:
> Hi Stefano,
> 
> On 02/11/16 17:41, Stefano Stabellini wrote:
> > On Wed, 2 Nov 2016, Julien Grall wrote:
> > > > +    }
> > > > +
> > > > +    ptr = vmap(pages, nr_pages);
> > > 
> > > I am not a big fan of the vmap solution for various reasons:
> > > 	- the VMAP area is small (only 1GB) it will not scale (you seem
> > > to use it to map pretty much all memory provisioned for the ITS)
> > > 	- writing in a register cannot fail, how do you co-op with that?
> > > 
> > > I think the best approach here is to use a similar approach as
> > > copy_*_guests helpers but dealing with IPA rather than guest VA.
> > 
> > I don't like the idea of using the vmap for this either, but the problem
> > with the copy_*_guest approach is that it only maps one page at a time.
> > It is unable to map multiple pages contiguously, which seems to be
> > required here.
> 
> We will get into trouble very quickly with the vmap solution. The memory
> provisioned by the ITS can be quite big. Each GITS_BASER can hold up to 16MB
> (if I computed correctly) of memory.
> 
> The question is why do we need to map the page contiguously? Can we do in a
> different way?
 
I agree with Julien: if we can get away with mapping one page at a time
without performance penalties, we should do it. Keep in mind that on
ARM64 Xen doesn't actually need to map anything because the whole
physical memory is already mapped.
Andre Przywara Nov. 3, 2016, 8:21 p.m. UTC | #7
Hi,

On 24/10/16 16:32, Vijay Kilari wrote:
> On Wed, Sep 28, 2016 at 11:54 PM, Andre Przywara <andre.przywara@arm.com> wrote:
>> Allow a guest to provide the address and size for the memory regions
>> it has reserved for the GICv3 pending and property tables.
>> We sanitise the various fields of the respective redistributor
>> registers and map those pages into Xen's address space to have easy
>> access.
>>
>> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
>> ---
>>  xen/arch/arm/vgic-v3.c        | 189 ++++++++++++++++++++++++++++++++++++++----
>>  xen/arch/arm/vgic.c           |   4 +
>>  xen/include/asm-arm/domain.h  |   7 +-
>>  xen/include/asm-arm/gic-its.h |  10 ++-
>>  xen/include/asm-arm/vgic.h    |   3 +
>>  5 files changed, 197 insertions(+), 16 deletions(-)
>>
>> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
>> index e9b6490..8fe8386 100644
>> --- a/xen/arch/arm/vgic-v3.c
>> +++ b/xen/arch/arm/vgic-v3.c
>> @@ -20,12 +20,14 @@
>>
>>  #include <xen/bitops.h>
>>  #include <xen/config.h>
>> +#include <xen/domain_page.h>
>>  #include <xen/lib.h>
>>  #include <xen/init.h>
>>  #include <xen/softirq.h>
>>  #include <xen/irq.h>
>>  #include <xen/sched.h>
>>  #include <xen/sizes.h>
>> +#include <xen/vmap.h>
>>  #include <asm/current.h>
>>  #include <asm/mmio.h>
>>  #include <asm/gic_v3_defs.h>
>> @@ -228,12 +230,14 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
>>          goto read_reserved;
>>
>>      case VREG64(GICR_PROPBASER):
>> -        /* LPI's not implemented */
>> -        goto read_as_zero_64;
>> +        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
>> +        *r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info);
>> +        return 1;
>>
>>      case VREG64(GICR_PENDBASER):
>> -        /* LPI's not implemented */
>> -        goto read_as_zero_64;
>> +        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
>> +        *r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info);
>> +        return 1;
>>
>>      case 0x0080:
>>          goto read_reserved;
>> @@ -301,11 +305,6 @@ bad_width:
>>      domain_crash_synchronous();
>>      return 0;
>>
>> -read_as_zero_64:
>> -    if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
>> -    *r = 0;
>> -    return 1;
>> -
>>  read_as_zero_32:
>>      if ( dabt.size != DABT_WORD ) goto bad_width;
>>      *r = 0;
>> @@ -330,11 +329,149 @@ read_unknown:
>>      return 1;
>>  }
>>
>> +static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask,
>> +                                    int field_shift,
>> +                                    uint64_t (*sanitise_fn)(uint64_t))
>> +{
>> +    uint64_t field = (reg & field_mask) >> field_shift;
>> +
>> +    field = sanitise_fn(field) << field_shift;
>> +    return (reg & ~field_mask) | field;
>> +}
>> +
>> +/* We want to avoid outer shareable. */
>> +static uint64_t vgic_sanitise_shareability(uint64_t field)
>> +{
>> +    switch (field) {
>> +    case GIC_BASER_OuterShareable:
>> +        return GIC_BASER_InnerShareable;
>> +    default:
>> +        return field;
>> +    }
>> +}
>> +
>> +/* Avoid any inner non-cacheable mapping. */
>> +static uint64_t vgic_sanitise_inner_cacheability(uint64_t field)
>> +{
>> +    switch (field) {
>> +    case GIC_BASER_CACHE_nCnB:
>> +    case GIC_BASER_CACHE_nC:
>> +        return GIC_BASER_CACHE_RaWb;
>> +    default:
>> +        return field;
>> +    }
>> +}
>> +
>> +/* Non-cacheable or same-as-inner are OK. */
>> +static uint64_t vgic_sanitise_outer_cacheability(uint64_t field)
>> +{
>> +    switch (field) {
>> +    case GIC_BASER_CACHE_SameAsInner:
>> +    case GIC_BASER_CACHE_nC:
>> +        return field;
>> +    default:
>> +        return GIC_BASER_CACHE_nC;
>> +    }
>> +}
>> +
>> +static uint64_t sanitize_propbaser(uint64_t reg)
>> +{
>> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
>> +                              GICR_PROPBASER_SHAREABILITY_SHIFT,
>> +                              vgic_sanitise_shareability);
>> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
>> +                              GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
>> +                              vgic_sanitise_inner_cacheability);
>> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
>> +                              GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
>> +                              vgic_sanitise_outer_cacheability);
>> +
>> +    reg &= ~PROPBASER_RES0_MASK;
>> +    reg &= ~GENMASK(51, 48);
>> +    return reg;
>> +}
>> +
>> +static uint64_t sanitize_pendbaser(uint64_t reg)
>> +{
>> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
>> +                              GICR_PENDBASER_SHAREABILITY_SHIFT,
>> +                              vgic_sanitise_shareability);
>> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
>> +                              GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
>> +                              vgic_sanitise_inner_cacheability);
>> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
>> +                              GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
>> +                              vgic_sanitise_outer_cacheability);
>> +
>> +    reg &= ~PENDBASER_RES0_MASK;
>> +    reg &= ~GENMASK(51, 48);
>> +    return reg;
>> +}
>> +
>> +/*
>> + * Allow mapping some parts of guest memory into Xen's VA space to have easy
>> + * access to it. This is to allow ITS configuration data to be held in
>> + * guest memory and avoid using Xen memory for that.
>> + */
>> +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages)
>    I think this file is not right place to put this generic function

Yeah, possibly.

>> +{
>> +    mfn_t onepage;
>> +    mfn_t *pages;
>> +    int i;
>> +    void *ptr;
>> +
>> +    /* TODO: free previous mapping, change prototype? use get-put-put? */
>> +
>> +    guest_addr &= PAGE_MASK;
>> +
>> +    if ( nr_pages == 1 )
>> +    {
>> +        pages = &onepage;
>> +    } else
>> +    {
>> +        pages = xmalloc_array(mfn_t, nr_pages);
>> +        if ( !pages )
>> +            return NULL;
>> +    }
>> +
>> +    for (i = 0; i < nr_pages; i++)
>> +    {
>> +        get_page_from_gfn(d, (guest_addr >> PAGE_SHIFT) + i, NULL, P2M_ALLOC);
> 
>              check return value of this function

Yes.

>> +        pages[i] = _mfn((guest_addr + i * PAGE_SIZE) >> PAGE_SHIFT);
>> +    }
>> +
>> +    ptr = vmap(pages, nr_pages);
>> +
>> +    if ( nr_pages > 1 )
>> +        xfree(pages);
>> +
>> +    return ptr;
>> +}
>> +
>> +void unmap_guest_pages(void *va, int nr_pages)
>       same here. Can be put in generic file p2m.c?
>> +{
>> +    paddr_t pa;
>> +    unsigned long i;
>> +
>> +    if ( !va )
>> +        return;
>> +
>> +    va = (void *)((uintptr_t)va & PAGE_MASK);
>> +    pa = virt_to_maddr(va);
>   can use _pa()

Do you mean __pa()? Which is defined to be exactly virt_to_maddr()?
I prefer the more verbose version, which is more readable, IMHO.

>> +
>> +    vunmap(va);
>> +    for (i = 0; i < nr_pages; i++)
>> +        put_page(mfn_to_page((pa >> PAGE_SHIFT) + i));
>> +
>> +    return;
>> +}
>> +
>>  static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
>>                                            uint32_t gicr_reg,
>>                                            register_t r)
>>  {
>>      struct hsr_dabt dabt = info->dabt;
>> +    uint64_t reg;
>>
>>      switch ( gicr_reg )
>>      {
>> @@ -375,13 +512,37 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
>>      case 0x0050:
>>          goto write_reserved;
>>
>> -    case VREG64(GICR_PROPBASER):
>> -        /* LPI is not implemented */
>> -        goto write_ignore_64;
>> +    case VREG64(GICR_PROPBASER): {
>> +        int nr_pages;
>> +
>> +        if ( info->dabt.size < DABT_WORD ) goto bad_width;
>> +        if ( v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED )
>> +            return 1;
>> +
>> +        reg = v->domain->arch.vgic.rdist_propbase;
>> +        vgic_reg64_update(&reg, r, info);
>> +        reg = sanitize_propbaser(reg);
>> +        v->domain->arch.vgic.rdist_propbase = reg;
>>
>> +        nr_pages = BIT((v->domain->arch.vgic.rdist_propbase & 0x1f) + 1) - 8192;
>              should be validated against HOST_LPIS?

I don't think so. The actual LPI numbers are totally independent between
host and Dom0.
So why and how should this be matched?

>> +        nr_pages = DIV_ROUND_UP(nr_pages, PAGE_SIZE);
>> +        unmap_guest_pages(v->domain->arch.vgic.proptable, nr_pages);
>> +        v->domain->arch.vgic.proptable = map_guest_pages(v->domain,
>> +                                                         reg & GENMASK(47, 12),
>> +                                                         nr_pages);
>> +        return 1;
>> +    }
>>      case VREG64(GICR_PENDBASER):
>> -        /* LPI is not implemented */
>> -        goto write_ignore_64;
>> +        if ( info->dabt.size < DABT_WORD ) goto bad_width;
> 
>    check on VGIC_V3_LPIS_ENABLED is required

Right, forgot this.

>> +       reg = v->arch.vgic.rdist_pendbase;
>> +       vgic_reg64_update(&reg, r, info);
>> +       reg = sanitize_pendbaser(reg);
>> +       v->arch.vgic.rdist_pendbase = reg;
>> +
>> +        unmap_guest_pages(v->arch.vgic.pendtable, 16);
>      why only 16 pages are unmapped?

Well, it matches the allocation below, but I agree that this should
match the advertised number of LPIs in GICD_TYPER.

Cheers,
Andre.

>> +       v->arch.vgic.pendtable = map_guest_pages(v->domain,
>> +                                                 reg & GENMASK(47, 12), 16);
>> +       return 1;
>>
>>      case 0x0080:
>>          goto write_reserved;
>> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
>> index b961551..4d9304f 100644
>> --- a/xen/arch/arm/vgic.c
>> +++ b/xen/arch/arm/vgic.c
>> @@ -488,6 +488,10 @@ struct pending_irq *lpi_to_pending(struct vcpu *v, unsigned int lpi,
>>          empty->pirq.irq = lpi;
>>      }
>>
>> +    /* Update the enabled status */
>> +    if ( gicv3_lpi_is_enabled(v->domain, lpi) )
>> +        set_bit(GIC_IRQ_GUEST_ENABLED, &empty->pirq.status);
>> +
>>      return &empty->pirq;
>>  }
>>
>> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
>> index ae8a9de..0cd3500 100644
>> --- a/xen/include/asm-arm/domain.h
>> +++ b/xen/include/asm-arm/domain.h
>> @@ -109,6 +109,8 @@ struct arch_domain
>>          } *rdist_regions;
>>          int nr_regions;                     /* Number of rdist regions */
>>          uint32_t rdist_stride;              /* Re-Distributor stride */
>> +        uint64_t rdist_propbase;
>> +        uint8_t *proptable;
>>  #endif
>>      } vgic;
>>
>> @@ -247,7 +249,10 @@ struct arch_vcpu
>>
>>          /* GICv3: redistributor base and flags for this vCPU */
>>          paddr_t rdist_base;
>> -#define VGIC_V3_RDIST_LAST  (1 << 0)        /* last vCPU of the rdist */
>> +#define VGIC_V3_RDIST_LAST      (1 << 0)        /* last vCPU of the rdist */
>> +#define VGIC_V3_LPIS_ENABLED    (1 << 1)
>> +        uint64_t rdist_pendbase;
>> +        unsigned long *pendtable;
>>          uint8_t flags;
>>          struct list_head pending_lpi_list;
>>      } vgic;
>> diff --git a/xen/include/asm-arm/gic-its.h b/xen/include/asm-arm/gic-its.h
>> index 1f881c0..3b2e5c0 100644
>> --- a/xen/include/asm-arm/gic-its.h
>> +++ b/xen/include/asm-arm/gic-its.h
>> @@ -139,7 +139,11 @@ int gicv3_lpi_drop_host_lpi(struct host_its *its,
>>
>>  static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
>>  {
>> -    return GIC_PRI_IRQ;
>> +    return d->arch.vgic.proptable[lpi - 8192] & 0xfc;
>> +}
>> +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi)
>> +{
>> +    return d->arch.vgic.proptable[lpi - 8192] & LPI_PROP_ENABLED;
>>  }
>>
>>  #else
>> @@ -185,6 +189,10 @@ static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
>>  {
>>      return GIC_PRI_IRQ;
>>  }
>> +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi)
>> +{
>> +    return false;
>> +}
>>
>>  #endif /* CONFIG_HAS_ITS */
>>
>> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
>> index 4e29ba6..2b216cc 100644
>> --- a/xen/include/asm-arm/vgic.h
>> +++ b/xen/include/asm-arm/vgic.h
>> @@ -285,6 +285,9 @@ VGIC_REG_HELPERS(32, 0x3);
>>
>>  #undef VGIC_REG_HELPERS
>>
>> +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages);
>> +void unmap_guest_pages(void *va, int nr_pages);
>> +
>>  enum gic_sgi_mode;
>>
>>  /*
>> --
>> 2.9.0
>>
>>
>> _______________________________________________
>> Xen-devel mailing list
>> Xen-devel@lists.xen.org
>> https://lists.xen.org/xen-devel
>
Julien Grall Nov. 4, 2016, 11:53 a.m. UTC | #8
Hi,

On 03/11/16 20:21, Andre Przywara wrote:
> On 24/10/16 16:32, Vijay Kilari wrote:
>> On Wed, Sep 28, 2016 at 11:54 PM, Andre Przywara <andre.przywara@arm.com> wrote:
>>> +    va = (void *)((uintptr_t)va & PAGE_MASK);
>>> +    pa = virt_to_maddr(va);
>>   can use _pa()
>
> Do you mean __pa()? Which is defined to be exactly virt_to_maddr()?
> I prefer the more verbose version, which is more readable, IMHO.

FWIW, __pa tends to be more used than virt_to_maddr within the source base.
Andre Przywara Jan. 31, 2017, 9:10 a.m. UTC | #9
Hi Julien,

(forgot to hit the Send button yesterday ...)

....

On 02/11/16 17:18, Julien Grall wrote:
> Hi Andre,
> 
> On 28/09/16 19:24, Andre Przywara wrote:
>> Allow a guest to provide the address and size for the memory regions
>> it has reserved for the GICv3 pending and property tables.
>> We sanitise the various fields of the respective redistributor
>> registers and map those pages into Xen's address space to have easy
>> access.
>>
>> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
>> ---
>>  xen/arch/arm/vgic-v3.c        | 189
>> ++++++++++++++++++++++++++++++++++++++----
>>  xen/arch/arm/vgic.c           |   4 +
>>  xen/include/asm-arm/domain.h  |   7 +-
>>  xen/include/asm-arm/gic-its.h |  10 ++-
>>  xen/include/asm-arm/vgic.h    |   3 +
>>  5 files changed, 197 insertions(+), 16 deletions(-)
>>
>> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
>> index e9b6490..8fe8386 100644
>> --- a/xen/arch/arm/vgic-v3.c
>> +++ b/xen/arch/arm/vgic-v3.c
>> @@ -20,12 +20,14 @@
>>
>>  #include <xen/bitops.h>
>>  #include <xen/config.h>
>> +#include <xen/domain_page.h>
>>  #include <xen/lib.h>
>>  #include <xen/init.h>
>>  #include <xen/softirq.h>
>>  #include <xen/irq.h>
>>  #include <xen/sched.h>
>>  #include <xen/sizes.h>
>> +#include <xen/vmap.h>
>>  #include <asm/current.h>
>>  #include <asm/mmio.h>
>>  #include <asm/gic_v3_defs.h>
>> @@ -228,12 +230,14 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct
>> vcpu *v, mmio_info_t *info,
>>          goto read_reserved;
>>
>>      case VREG64(GICR_PROPBASER):
>> -        /* LPI's not implemented */
>> -        goto read_as_zero_64;
>> +        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
>> +        *r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase,
>> info);
>> +        return 1;
>>
>>      case VREG64(GICR_PENDBASER):
>> -        /* LPI's not implemented */
>> -        goto read_as_zero_64;
>> +        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
>> +        *r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info);
> 
> The field PTZ read as 0.
> 
>> +        return 1;
>>
>>      case 0x0080:
>>          goto read_reserved;
>> @@ -301,11 +305,6 @@ bad_width:
>>      domain_crash_synchronous();
>>      return 0;
>>
>> -read_as_zero_64:
>> -    if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
>> -    *r = 0;
>> -    return 1;
>> -
>>  read_as_zero_32:
>>      if ( dabt.size != DABT_WORD ) goto bad_width;
>>      *r = 0;
>> @@ -330,11 +329,149 @@ read_unknown:
>>      return 1;
>>  }
>>
>> +static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask,
>> +                                    int field_shift,
>> +                                    uint64_t (*sanitise_fn)(uint64_t))
>> +{
>> +    uint64_t field = (reg & field_mask) >> field_shift;
>> +
>> +    field = sanitise_fn(field) << field_shift;
> 
> Newline here please.
> 
>> +    return (reg & ~field_mask) | field;
>> +}
>> +
>> +/* We want to avoid outer shareable. */
>> +static uint64_t vgic_sanitise_shareability(uint64_t field)
>> +{
>> +    switch (field) {
>> +    case GIC_BASER_OuterShareable:
>> +        return GIC_BASER_InnerShareable;
>> +    default:
>> +        return field;
>> +    }
>> +}
> 
> I am not sure to understand why we need to sanitise the value here. From
> my understanding of the spec (see 8.11.18 in IHI 0069C) we should
> support any shareability/cacheability, correct?

No, actually an ITS is free to support only _one_ of those attributes,
up to the point where it is read-only:

"It is IMPLEMENTATION DEFINED whether this field has a fixed value or
can be programmed by software. Implementing this field with a fixed
value is deprecated."

So we support more than one value, but refuse any really not useful
ones. This goes in line with the KVM implementation.

For the rest of the comments regarding the memory tables setup:
I effectively rewrote this in the new series, so I think the majority of
the comments don't apply anymore, hopefully the rewrite actually fixed
the issues you mentioned. So I refrain from any comments now and look
forward to a review of the new approach ;-)

Cheers,
Andre.

>> +
>> +/* Avoid any inner non-cacheable mapping. */
>> +static uint64_t vgic_sanitise_inner_cacheability(uint64_t field)
>> +{
>> +    switch (field) {
>> +    case GIC_BASER_CACHE_nCnB:
>> +    case GIC_BASER_CACHE_nC:
>> +        return GIC_BASER_CACHE_RaWb;
>> +    default:
>> +        return field;
>> +    }
>> +}
>> +
>> +/* Non-cacheable or same-as-inner are OK. */
>> +static uint64_t vgic_sanitise_outer_cacheability(uint64_t field)
>> +{
>> +    switch (field) {
>> +    case GIC_BASER_CACHE_SameAsInner:
>> +    case GIC_BASER_CACHE_nC:
>> +        return field;
>> +    default:
>> +        return GIC_BASER_CACHE_nC;
>> +    }
>> +}
>> +
>> +static uint64_t sanitize_propbaser(uint64_t reg)
>> +{
>> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
>> +                              GICR_PROPBASER_SHAREABILITY_SHIFT,
>> +                              vgic_sanitise_shareability);
>> +    reg = vgic_sanitise_field(reg,
>> GICR_PROPBASER_INNER_CACHEABILITY_MASK,
>> +                              GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
>> +                              vgic_sanitise_inner_cacheability);
>> +    reg = vgic_sanitise_field(reg,
>> GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
>> +                              GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
>> +                              vgic_sanitise_outer_cacheability);
>> +
>> +    reg &= ~PROPBASER_RES0_MASK;
>> +    reg &= ~GENMASK(51, 48);
> 
> Why do you mask the bits 51:48. There is no restriction in Xen about the
> size of the IPA (though 52 bits support is part of ARMv8.2), so we
> should avoid to open-code mask everywhere in the code. Otherwise it will
> be more painful to extend the number of bits supported.
> 
> FWIW, all the p2m code is checking whether the IPA is supported.
> 
>> +    return reg;
>> +}
>> +
>> +static uint64_t sanitize_pendbaser(uint64_t reg)
>> +{
>> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
>> +                              GICR_PENDBASER_SHAREABILITY_SHIFT,
>> +                              vgic_sanitise_shareability);
>> +    reg = vgic_sanitise_field(reg,
>> GICR_PENDBASER_INNER_CACHEABILITY_MASK,
>> +                              GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
>> +                              vgic_sanitise_inner_cacheability);
>> +    reg = vgic_sanitise_field(reg,
>> GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
>> +                              GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
>> +                              vgic_sanitise_outer_cacheability);
>> +
>> +    reg &= ~PENDBASER_RES0_MASK;
>> +    reg &= ~GENMASK(51, 48);
> 
> Ditto.
> 
>> +    return reg;
>> +}
>> +
>> +/*
>> + * Allow mapping some parts of guest memory into Xen's VA space to
>> have easy
>> + * access to it. This is to allow ITS configuration data to be held in
>> + * guest memory and avoid using Xen memory for that.
>> + */
>> +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int
>> nr_pages)
> 
> Please pass a gfn_t rather than paddr_t.
> 
>> +{
>> +    mfn_t onepage;
>> +    mfn_t *pages;
> 
> s/pages/mfns/
> 
>> +    int i;
>> +    void *ptr;
>> +
>> +    /* TODO: free previous mapping, change prototype? use
>> get-put-put? */
>> +
>> +    guest_addr &= PAGE_MASK;
>> +
>> +    if ( nr_pages == 1 )
>> +    {
>> +        pages = &onepage;
>> +    } else
>> +    {
>> +        pages = xmalloc_array(mfn_t, nr_pages);
>> +        if ( !pages )
>> +            return NULL;
>> +    }
>> +
>> +    for (i = 0; i < nr_pages; i++)
>> +    {
>> +        get_page_from_gfn(d, (guest_addr >> PAGE_SHIFT) + i, NULL,
>> P2M_ALLOC);
> 
> get_page_from_gfn can fail if you try to get a page on memory that is
> not baked by a RAM region. Also get_page_from_gfn will work on foreign
> mapping, we don't want the guest using foreing memory (e.g memory
> belonging to another domain) for the ITS internal memory.
> 
> Also, please try to pay attention for error path whilst you write code.
> It is a pain to handle them after the code has been written. I will try
> to point them when I spot it.
> 
>> +        pages[i] = _mfn((guest_addr + i * PAGE_SIZE) >> PAGE_SHIFT);
> 
> You cannot assume a 1:1 mapping between the IPA and the PA. Please use
> the struct page_info returned by get_page_from_gfn
> 
>> +    }
>> +
>> +    ptr = vmap(pages, nr_pages);
> 
> I am not a big fan of the vmap solution for various reasons:
>     - the VMAP area is small (only 1GB) it will not scale (you seem to
> use it to map pretty much all memory provisioned for the ITS)
>     - writing in a register cannot fail, how do you co-op with that?
> 
> I think the best approach here is to use a similar approach as
> copy_*_guests helpers but dealing with IPA rather than guest VA.
> 
>> +
>> +    if ( nr_pages > 1 )
>> +        xfree(pages);
>> +
>> +    return ptr;
>> +}
>> +
>> +void unmap_guest_pages(void *va, int nr_pages)
>> +{
>> +    paddr_t pa;
>> +    unsigned long i;
>> +
>> +    if ( !va )
>> +        return;
>> +
>> +    va = (void *)((uintptr_t)va & PAGE_MASK);
>> +    pa = virt_to_maddr(va);
>> +
>> +    vunmap(va);
>> +    for (i = 0; i < nr_pages; i++)
>> +        put_page(mfn_to_page((pa >> PAGE_SHIFT) + i));
>> +
>> +    return;
>> +}
>> +
>>  static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t
>> *info,
>>                                            uint32_t gicr_reg,
>>                                            register_t r)
>>  {
>>      struct hsr_dabt dabt = info->dabt;
>> +    uint64_t reg;
>>
>>      switch ( gicr_reg )
>>      {
>> @@ -375,13 +512,37 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct
>> vcpu *v, mmio_info_t *info,
>>      case 0x0050:
>>          goto write_reserved;
>>
>> -    case VREG64(GICR_PROPBASER):
>> -        /* LPI is not implemented */
>> -        goto write_ignore_64;
>> +    case VREG64(GICR_PROPBASER): {
> 
> Coding style, the { should be on a line.
> 
>> +        int nr_pages;
> 
> unsigned int
> 
>> +
>> +        if ( info->dabt.size < DABT_WORD ) goto bad_width;
> 
> Newline here for clarity. Also please use vgic_reg64_check_access rather
> than open-coding it.
> 
>> +        if ( v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED )
> 
> From my understanding VGIC_V3_LPIS_ENABLED is set when the guest enable
> LPIs on this re-distributor. However, this check is not safe as
> GICR_CTLR.Enable_LPIs may be set concurrently (the re-distributors are
> accessible from any vCPU).
> 
> Also, when ITS is not available we should avoid to handle the register
> (i.e treating as write ignore). My rational here is we should limit the
> amount of emulation exposed to the guest whenever it is possible.
> 
>> +            return 1;
> 
> I think we should at least print warning as writing to GICR_PROPBASER
> when GICR_CTLR.Enable_LPIs is set is unpredictable. IHMO, I would even
> crash the guest.
> 
> The code below likely needs locking as the property table is common to
> all re-distributor, hence could be modified concurrently. Also, I would
> like to see a comment on top of emulation of GICR_TYPER to mention that
> all re-distributor shares the same common property table
> (GICR_TYPER.CommonLPIAff = 0).
> 
>> +
>> +        reg = v->domain->arch.vgic.rdist_propbase;
>> +        vgic_reg64_update(&reg, r, info);
>> +        reg = sanitize_propbaser(reg);
>> +        v->domain->arch.vgic.rdist_propbase = reg;
>>
>> +        nr_pages = BIT((v->domain->arch.vgic.rdist_propbase & 0x1f) +
>> 1) - 8192;
> 
> The spec (see 8.11.19): "If the value of this field is larger than the
> value of GICD_TYPER.IDbits, the GICD_TYPER.IDbits value applies). We
> don't want to map more than necessary uin
> 
>> +        nr_pages = DIV_ROUND_UP(nr_pages, PAGE_SIZE);
>> +        unmap_guest_pages(v->domain->arch.vgic.proptable, nr_pages);
> 
> This looks wrong to me. A guest could specify a size different from the
> previous write.
> 
>> +        v->domain->arch.vgic.proptable = map_guest_pages(v->domain,
>> +                                                         reg &
>> GENMASK(47, 12),
>> +                                                         nr_pages);
>> +        return 1;
>> +    }
>>      case VREG64(GICR_PENDBASER):
>> -        /* LPI is not implemented */
>> -        goto write_ignore_64;
>> +        if ( info->dabt.size < DABT_WORD ) goto bad_width;
> 
> Newline + vgic_reg64_check_access
> 
> Also, you don't check whether the LPIs have been enabled here.
> 
> All my comments above stands. Furthermore, the code is not correctly
> indented (you are using hard tab).
> 
>> +    reg = v->arch.vgic.rdist_pendbase;
>> +    vgic_reg64_update(&reg, r, info);
>> +    reg = sanitize_pendbaser(reg);
>> +    v->arch.vgic.rdist_pendbase = reg;
>> +
>> +        unmap_guest_pages(v->arch.vgic.pendtable, 16);
>> +    v->arch.vgic.pendtable = map_guest_pages(v->domain,
>> +                                                 reg & GENMASK(47,
>> 12), 16);
> 
> The pending table is never touched by Xen. So I would avoid to mapping it.
> 
>> +    return 1;
>>
>>      case 0x0080:
>>          goto write_reserved;
>> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
>> index b961551..4d9304f 100644
>> --- a/xen/arch/arm/vgic.c
>> +++ b/xen/arch/arm/vgic.c
>> @@ -488,6 +488,10 @@ struct pending_irq *lpi_to_pending(struct vcpu
>> *v, unsigned int lpi,
>>          empty->pirq.irq = lpi;
>>      }
>>
>> +    /* Update the enabled status */
>> +    if ( gicv3_lpi_is_enabled(v->domain, lpi) )
>> +        set_bit(GIC_IRQ_GUEST_ENABLED, &empty->pirq.status);
>> +
>>      return &empty->pirq;
>>  }
>>
>> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
>> index ae8a9de..0cd3500 100644
>> --- a/xen/include/asm-arm/domain.h
>> +++ b/xen/include/asm-arm/domain.h
>> @@ -109,6 +109,8 @@ struct arch_domain
>>          } *rdist_regions;
>>          int nr_regions;                     /* Number of rdist
>> regions */
>>          uint32_t rdist_stride;              /* Re-Distributor stride */
>> +        uint64_t rdist_propbase;
>> +        uint8_t *proptable;
>>  #endif
>>      } vgic;
>>
>> @@ -247,7 +249,10 @@ struct arch_vcpu
>>
>>          /* GICv3: redistributor base and flags for this vCPU */
>>          paddr_t rdist_base;
>> -#define VGIC_V3_RDIST_LAST  (1 << 0)        /* last vCPU of the rdist */
>> +#define VGIC_V3_RDIST_LAST      (1 << 0)        /* last vCPU of the
>> rdist */
> 
> Please avoid spurious change. We don't require in Xen to have all the
> constant aligned. This also makes harder to got through the changes.
> 
>> +#define VGIC_V3_LPIS_ENABLED    (1 << 1)
> 
> Please document the purpose of this bit.
> 
>> +        uint64_t rdist_pendbase;
>> +        unsigned long *pendtable;
>>          uint8_t flags;
>>          struct list_head pending_lpi_list;
>>      } vgic;
>> diff --git a/xen/include/asm-arm/gic-its.h
>> b/xen/include/asm-arm/gic-its.h
>> index 1f881c0..3b2e5c0 100644
>> --- a/xen/include/asm-arm/gic-its.h
>> +++ b/xen/include/asm-arm/gic-its.h
>> @@ -139,7 +139,11 @@ int gicv3_lpi_drop_host_lpi(struct host_its *its,
>>
>>  static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
> 
> s/lpi/vlpi/ to make clear this is a function deal with virtual LPIs.
> 
>>  {
>> -    return GIC_PRI_IRQ;
>> +    return d->arch.vgic.proptable[lpi - 8192] & 0xfc;
> 
> I think this is the best place to ask this question, I don't see any
> code within this series to check that the guest effectively initialized
> proptable and the size is correct (you don't check that the guest
> provided enough memory compare to the vLPI suggested).
> 
> FWIW, I have only already made those comments back Vijay sent his patch
> series. It might be worth for you to look at what he did regarding all
> the sanity checks.
> 
>> +}
> 
> Newline here for clarity.
> 
>> +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi)
> 
> Ditto for the naming.
> 
>> +{
>> +    return d->arch.vgic.proptable[lpi - 8192] & LPI_PROP_ENABLED;
>>  }
>>
>>  #else
>> @@ -185,6 +189,10 @@ static inline int gicv3_lpi_get_priority(struct
>> domain *d, uint32_t lpi)
>>  {
>>      return GIC_PRI_IRQ;
>>  }
> 
> Newline here for clarity.
> 
>> +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi)
>> +{
>> +    return false;
>> +}
>>
>>  #endif /* CONFIG_HAS_ITS */
>>
>> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
>> index 4e29ba6..2b216cc 100644
>> --- a/xen/include/asm-arm/vgic.h
>> +++ b/xen/include/asm-arm/vgic.h
>> @@ -285,6 +285,9 @@ VGIC_REG_HELPERS(32, 0x3);
>>
>>  #undef VGIC_REG_HELPERS
>>
>> +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int
>> nr_pages);
>> +void unmap_guest_pages(void *va, int nr_pages);
>> +
>>  enum gic_sgi_mode;
>>
>>  /*
>>
> 
> Regards,
>
Julien Grall Jan. 31, 2017, 10:38 a.m. UTC | #10
On 31/01/2017 09:10, Andre Przywara wrote:
> Hi Julien,

Hi Andre,

> On 02/11/16 17:18, Julien Grall wrote:
>> On 28/09/16 19:24, Andre Przywara wrote:
>>> +    return (reg & ~field_mask) | field;
>>> +}
>>> +
>>> +/* We want to avoid outer shareable. */
>>> +static uint64_t vgic_sanitise_shareability(uint64_t field)
>>> +{
>>> +    switch (field) {
>>> +    case GIC_BASER_OuterShareable:
>>> +        return GIC_BASER_InnerShareable;
>>> +    default:
>>> +        return field;
>>> +    }
>>> +}
>>
>> I am not sure to understand why we need to sanitise the value here. From
>> my understanding of the spec (see 8.11.18 in IHI 0069C) we should
>> support any shareability/cacheability, correct?
>
> No, actually an ITS is free to support only _one_ of those attributes,
> up to the point where it is read-only:
>
> "It is IMPLEMENTATION DEFINED whether this field has a fixed value or
> can be programmed by software. Implementing this field with a fixed
> value is deprecated."
>
> So we support more than one value, but refuse any really not useful
> ones. This goes in line with the KVM implementation.

Looking at your quote from the spec, this behavior is deprecated. Why do 
we want to implement a deprecated behavior?

>
> For the rest of the comments regarding the memory tables setup:
> I effectively rewrote this in the new series, so I think the majority of
> the comments don't apply anymore, hopefully the rewrite actually fixed
> the issues you mentioned. So I refrain from any comments now and look
> forward to a review of the new approach ;-)

I will give a look to the new implementation.

Cheers,
Andre Przywara Jan. 31, 2017, 12:04 p.m. UTC | #11
Hi,

On 31/01/17 10:38, Julien Grall wrote:
> 
> 
> On 31/01/2017 09:10, Andre Przywara wrote:
>> Hi Julien,
> 
> Hi Andre,
> 
>> On 02/11/16 17:18, Julien Grall wrote:
>>> On 28/09/16 19:24, Andre Przywara wrote:
>>>> +    return (reg & ~field_mask) | field;
>>>> +}
>>>> +
>>>> +/* We want to avoid outer shareable. */
>>>> +static uint64_t vgic_sanitise_shareability(uint64_t field)
>>>> +{
>>>> +    switch (field) {
>>>> +    case GIC_BASER_OuterShareable:
>>>> +        return GIC_BASER_InnerShareable;
>>>> +    default:
>>>> +        return field;
>>>> +    }
>>>> +}
>>>
>>> I am not sure to understand why we need to sanitise the value here. From
>>> my understanding of the spec (see 8.11.18 in IHI 0069C) we should
>>> support any shareability/cacheability, correct?
>>
>> No, actually an ITS is free to support only _one_ of those attributes,
>> up to the point where it is read-only:
>>
>> "It is IMPLEMENTATION DEFINED whether this field has a fixed value or
>> can be programmed by software. Implementing this field with a fixed
>> value is deprecated."
>>
>> So we support more than one value, but refuse any really not useful
>> ones. This goes in line with the KVM implementation.
> 
> Looking at your quote from the spec, this behavior is deprecated. Why do
> we want to implement a deprecated behavior?

We don't. Allowing only _one_ attribute and thus making those register
bits read-only is deprecated. We make sure to provide support for at
least two of them.
Supporting every possible attribute in a virtualization scenario is
pointless and not helpful. I believe the architecture requires software
to cope with only one attribute, even though this is for some reason
"deprecated" (which is a hint for an implementer, not for a driver author).

>> For the rest of the comments regarding the memory tables setup:
>> I effectively rewrote this in the new series, so I think the majority of
>> the comments don't apply anymore, hopefully the rewrite actually fixed
>> the issues you mentioned. So I refrain from any comments now and look
>> forward to a review of the new approach ;-)
> 
> I will give a look to the new implementation.

Thanks!

Cheers,
Andre.
Andre Przywara March 29, 2017, 3:47 p.m. UTC | #12
Hi,

On 29/10/16 01:39, Stefano Stabellini wrote:
> On Wed, 28 Sep 2016, Andre Przywara wrote:
>> Allow a guest to provide the address and size for the memory regions
>> it has reserved for the GICv3 pending and property tables.
>> We sanitise the various fields of the respective redistributor
>> registers and map those pages into Xen's address space to have easy
>> access.
>>
>> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
>> ---
>> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
>> index e9b6490..8fe8386 100644
>> --- a/xen/arch/arm/vgic-v3.c
>> +++ b/xen/arch/arm/vgic-v3.c
....

>> +        reg = v->domain->arch.vgic.rdist_propbase;
>> +        vgic_reg64_update(&reg, r, info);
>> +        reg = sanitize_propbaser(reg);
>> +        v->domain->arch.vgic.rdist_propbase = reg;
>>  
>> +        nr_pages = BIT((v->domain->arch.vgic.rdist_propbase & 0x1f) + 1) - 8192;
>> +        nr_pages = DIV_ROUND_UP(nr_pages, PAGE_SIZE);
> 
> Do we need to set an upper limit on nr_pages? We don't really want to
> allow (2^0x1f)/4096 pages, right?

Why not? This is the virtual property table, and the *guest* provides
the memory. We just comply here and map it. I don't see any issue.

[ .... ]

>> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
>> index b961551..4d9304f 100644
>> --- a/xen/arch/arm/vgic.c
>> +++ b/xen/arch/arm/vgic.c
>> @@ -488,6 +488,10 @@ struct pending_irq *lpi_to_pending(struct vcpu *v, unsigned int lpi,
>>          empty->pirq.irq = lpi;
>>      }
>>  
>> +    /* Update the enabled status */
>> +    if ( gicv3_lpi_is_enabled(v->domain, lpi) )
>> +        set_bit(GIC_IRQ_GUEST_ENABLED, &empty->pirq.status);
> 
> Where is the GIC_IRQ_GUEST_ENABLED unset?

In the patch where the INV command is emulated. This is how
enabling/disabling LPI works: Software (the guest here) sets the bit in
the property table and issues an ITS command to notify the ITS
(emulation) about it.

>>      return &empty->pirq;
>>  }
>>  
>> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
>> index ae8a9de..0cd3500 100644
>> --- a/xen/include/asm-arm/domain.h
>> +++ b/xen/include/asm-arm/domain.h
>> @@ -109,6 +109,8 @@ struct arch_domain
>>          } *rdist_regions;
>>          int nr_regions;                     /* Number of rdist regions */
>>          uint32_t rdist_stride;              /* Re-Distributor stride */
>> +        uint64_t rdist_propbase;
>> +        uint8_t *proptable;
> 
> Do we need to keep both rdist_propbase and proptable? It is easy to go
> from proptable to rdist_propbase and I guess it is not an operation that
> is done often? If so, we could save some memory and remove it.

The code has changed meanwhile, so this does not apply direclty anymore,
but just to make sure:
We need rdist_propbase separately, because a guest can happily set and
change it as often as it wants before enabling LPIs. We shouldn't (and
we don't) allocate memory now (and so set proptable) until the LPIs get
enabled.

>>  #endif
>>      } vgic;
>>  
>> @@ -247,7 +249,10 @@ struct arch_vcpu
>>  
>>          /* GICv3: redistributor base and flags for this vCPU */
>>          paddr_t rdist_base;
>> -#define VGIC_V3_RDIST_LAST  (1 << 0)        /* last vCPU of the rdist */
>> +#define VGIC_V3_RDIST_LAST      (1 << 0)        /* last vCPU of the rdist */
>> +#define VGIC_V3_LPIS_ENABLED    (1 << 1)
>> +        uint64_t rdist_pendbase;
>> +        unsigned long *pendtable;
> 
> Same here.

And the same rationale applies here.

Fixed / addresses the rest.

Cheers,
Andre.

>>          uint8_t flags;
>>          struct list_head pending_lpi_list;
>>      } vgic;
>> diff --git a/xen/include/asm-arm/gic-its.h b/xen/include/asm-arm/gic-its.h
>> index 1f881c0..3b2e5c0 100644
>> --- a/xen/include/asm-arm/gic-its.h
>> +++ b/xen/include/asm-arm/gic-its.h
>> @@ -139,7 +139,11 @@ int gicv3_lpi_drop_host_lpi(struct host_its *its,
>>  
>>  static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
>>  {
>> -    return GIC_PRI_IRQ;
>> +    return d->arch.vgic.proptable[lpi - 8192] & 0xfc;
> 
> Please #define 0xfc. Do we need to check for lpi overflows? As in lpi
> numbers larger than proptable size?
> 
> 
>> +}
>> +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi)
>> +{
>> +    return d->arch.vgic.proptable[lpi - 8192] & LPI_PROP_ENABLED;
>>  }
>>  
>>  #else
>> @@ -185,6 +189,10 @@ static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
>>  {
>>      return GIC_PRI_IRQ;
>>  }
>> +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi)
>> +{
>> +    return false;
>> +}
>>  
>>  #endif /* CONFIG_HAS_ITS */
>>  
>> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
>> index 4e29ba6..2b216cc 100644
>> --- a/xen/include/asm-arm/vgic.h
>> +++ b/xen/include/asm-arm/vgic.h
>> @@ -285,6 +285,9 @@ VGIC_REG_HELPERS(32, 0x3);
>>  
>>  #undef VGIC_REG_HELPERS
>>  
>> +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages);
>> +void unmap_guest_pages(void *va, int nr_pages);
>> +
>>  enum gic_sgi_mode;
>>  
>>  /*
diff mbox

Patch

diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index e9b6490..8fe8386 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -20,12 +20,14 @@ 
 
 #include <xen/bitops.h>
 #include <xen/config.h>
+#include <xen/domain_page.h>
 #include <xen/lib.h>
 #include <xen/init.h>
 #include <xen/softirq.h>
 #include <xen/irq.h>
 #include <xen/sched.h>
 #include <xen/sizes.h>
+#include <xen/vmap.h>
 #include <asm/current.h>
 #include <asm/mmio.h>
 #include <asm/gic_v3_defs.h>
@@ -228,12 +230,14 @@  static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
         goto read_reserved;
 
     case VREG64(GICR_PROPBASER):
-        /* LPI's not implemented */
-        goto read_as_zero_64;
+        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+        *r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info);
+        return 1;
 
     case VREG64(GICR_PENDBASER):
-        /* LPI's not implemented */
-        goto read_as_zero_64;
+        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+        *r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info);
+        return 1;
 
     case 0x0080:
         goto read_reserved;
@@ -301,11 +305,6 @@  bad_width:
     domain_crash_synchronous();
     return 0;
 
-read_as_zero_64:
-    if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
-    *r = 0;
-    return 1;
-
 read_as_zero_32:
     if ( dabt.size != DABT_WORD ) goto bad_width;
     *r = 0;
@@ -330,11 +329,149 @@  read_unknown:
     return 1;
 }
 
+static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask,
+                                    int field_shift,
+                                    uint64_t (*sanitise_fn)(uint64_t))
+{
+    uint64_t field = (reg & field_mask) >> field_shift;
+
+    field = sanitise_fn(field) << field_shift;
+    return (reg & ~field_mask) | field;
+}
+
+/* We want to avoid outer shareable. */
+static uint64_t vgic_sanitise_shareability(uint64_t field)
+{
+    switch (field) {
+    case GIC_BASER_OuterShareable:
+        return GIC_BASER_InnerShareable;
+    default:
+        return field;
+    }
+}
+
+/* Avoid any inner non-cacheable mapping. */
+static uint64_t vgic_sanitise_inner_cacheability(uint64_t field)
+{
+    switch (field) {
+    case GIC_BASER_CACHE_nCnB:
+    case GIC_BASER_CACHE_nC:
+        return GIC_BASER_CACHE_RaWb;
+    default:
+        return field;
+    }
+}
+
+/* Non-cacheable or same-as-inner are OK. */
+static uint64_t vgic_sanitise_outer_cacheability(uint64_t field)
+{
+    switch (field) {
+    case GIC_BASER_CACHE_SameAsInner:
+    case GIC_BASER_CACHE_nC:
+        return field;
+    default:
+        return GIC_BASER_CACHE_nC;
+    }
+}
+
+static uint64_t sanitize_propbaser(uint64_t reg)
+{
+    reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
+                              GICR_PROPBASER_SHAREABILITY_SHIFT,
+                              vgic_sanitise_shareability);
+    reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
+                              GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
+                              vgic_sanitise_inner_cacheability);
+    reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
+                              GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
+                              vgic_sanitise_outer_cacheability);
+
+    reg &= ~PROPBASER_RES0_MASK;
+    reg &= ~GENMASK(51, 48);
+    return reg;
+}
+
+static uint64_t sanitize_pendbaser(uint64_t reg)
+{
+    reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
+                              GICR_PENDBASER_SHAREABILITY_SHIFT,
+                              vgic_sanitise_shareability);
+    reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
+                              GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
+                              vgic_sanitise_inner_cacheability);
+    reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
+                              GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
+                              vgic_sanitise_outer_cacheability);
+
+    reg &= ~PENDBASER_RES0_MASK;
+    reg &= ~GENMASK(51, 48);
+    return reg;
+}
+
+/*
+ * Allow mapping some parts of guest memory into Xen's VA space to have easy
+ * access to it. This is to allow ITS configuration data to be held in
+ * guest memory and avoid using Xen memory for that.
+ */
+void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages)
+{
+    mfn_t onepage;
+    mfn_t *pages;
+    int i;
+    void *ptr;
+
+    /* TODO: free previous mapping, change prototype? use get-put-put? */
+
+    guest_addr &= PAGE_MASK;
+
+    if ( nr_pages == 1 )
+    {
+        pages = &onepage;
+    } else
+    {
+        pages = xmalloc_array(mfn_t, nr_pages);
+        if ( !pages )
+            return NULL;
+    }
+
+    for (i = 0; i < nr_pages; i++)
+    {
+        get_page_from_gfn(d, (guest_addr >> PAGE_SHIFT) + i, NULL, P2M_ALLOC);
+        pages[i] = _mfn((guest_addr + i * PAGE_SIZE) >> PAGE_SHIFT);
+    }
+
+    ptr = vmap(pages, nr_pages);
+
+    if ( nr_pages > 1 )
+        xfree(pages);
+
+    return ptr;
+}
+
+void unmap_guest_pages(void *va, int nr_pages)
+{
+    paddr_t pa;
+    unsigned long i;
+
+    if ( !va )
+        return;
+
+    va = (void *)((uintptr_t)va & PAGE_MASK);
+    pa = virt_to_maddr(va);
+
+    vunmap(va);
+    for (i = 0; i < nr_pages; i++)
+        put_page(mfn_to_page((pa >> PAGE_SHIFT) + i));
+
+    return;
+}
+
 static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
                                           uint32_t gicr_reg,
                                           register_t r)
 {
     struct hsr_dabt dabt = info->dabt;
+    uint64_t reg;
 
     switch ( gicr_reg )
     {
@@ -375,13 +512,37 @@  static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
     case 0x0050:
         goto write_reserved;
 
-    case VREG64(GICR_PROPBASER):
-        /* LPI is not implemented */
-        goto write_ignore_64;
+    case VREG64(GICR_PROPBASER): {
+        int nr_pages;
+
+        if ( info->dabt.size < DABT_WORD ) goto bad_width;
+        if ( v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED )
+            return 1;
+
+        reg = v->domain->arch.vgic.rdist_propbase;
+        vgic_reg64_update(&reg, r, info);
+        reg = sanitize_propbaser(reg);
+        v->domain->arch.vgic.rdist_propbase = reg;
 
+        nr_pages = BIT((v->domain->arch.vgic.rdist_propbase & 0x1f) + 1) - 8192;
+        nr_pages = DIV_ROUND_UP(nr_pages, PAGE_SIZE);
+        unmap_guest_pages(v->domain->arch.vgic.proptable, nr_pages);
+        v->domain->arch.vgic.proptable = map_guest_pages(v->domain,
+                                                         reg & GENMASK(47, 12),
+                                                         nr_pages);
+        return 1;
+    }
     case VREG64(GICR_PENDBASER):
-        /* LPI is not implemented */
-        goto write_ignore_64;
+        if ( info->dabt.size < DABT_WORD ) goto bad_width;
+	reg = v->arch.vgic.rdist_pendbase;
+	vgic_reg64_update(&reg, r, info);
+	reg = sanitize_pendbaser(reg);
+	v->arch.vgic.rdist_pendbase = reg;
+
+        unmap_guest_pages(v->arch.vgic.pendtable, 16);
+	v->arch.vgic.pendtable = map_guest_pages(v->domain,
+                                                 reg & GENMASK(47, 12), 16);
+	return 1;
 
     case 0x0080:
         goto write_reserved;
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index b961551..4d9304f 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -488,6 +488,10 @@  struct pending_irq *lpi_to_pending(struct vcpu *v, unsigned int lpi,
         empty->pirq.irq = lpi;
     }
 
+    /* Update the enabled status */
+    if ( gicv3_lpi_is_enabled(v->domain, lpi) )
+        set_bit(GIC_IRQ_GUEST_ENABLED, &empty->pirq.status);
+
     return &empty->pirq;
 }
 
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index ae8a9de..0cd3500 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -109,6 +109,8 @@  struct arch_domain
         } *rdist_regions;
         int nr_regions;                     /* Number of rdist regions */
         uint32_t rdist_stride;              /* Re-Distributor stride */
+        uint64_t rdist_propbase;
+        uint8_t *proptable;
 #endif
     } vgic;
 
@@ -247,7 +249,10 @@  struct arch_vcpu
 
         /* GICv3: redistributor base and flags for this vCPU */
         paddr_t rdist_base;
-#define VGIC_V3_RDIST_LAST  (1 << 0)        /* last vCPU of the rdist */
+#define VGIC_V3_RDIST_LAST      (1 << 0)        /* last vCPU of the rdist */
+#define VGIC_V3_LPIS_ENABLED    (1 << 1)
+        uint64_t rdist_pendbase;
+        unsigned long *pendtable;
         uint8_t flags;
         struct list_head pending_lpi_list;
     } vgic;
diff --git a/xen/include/asm-arm/gic-its.h b/xen/include/asm-arm/gic-its.h
index 1f881c0..3b2e5c0 100644
--- a/xen/include/asm-arm/gic-its.h
+++ b/xen/include/asm-arm/gic-its.h
@@ -139,7 +139,11 @@  int gicv3_lpi_drop_host_lpi(struct host_its *its,
 
 static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
 {
-    return GIC_PRI_IRQ;
+    return d->arch.vgic.proptable[lpi - 8192] & 0xfc;
+}
+static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi)
+{
+    return d->arch.vgic.proptable[lpi - 8192] & LPI_PROP_ENABLED;
 }
 
 #else
@@ -185,6 +189,10 @@  static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
 {
     return GIC_PRI_IRQ;
 }
+static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi)
+{
+    return false;
+}
 
 #endif /* CONFIG_HAS_ITS */
 
diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
index 4e29ba6..2b216cc 100644
--- a/xen/include/asm-arm/vgic.h
+++ b/xen/include/asm-arm/vgic.h
@@ -285,6 +285,9 @@  VGIC_REG_HELPERS(32, 0x3);
 
 #undef VGIC_REG_HELPERS
 
+void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages);
+void unmap_guest_pages(void *va, int nr_pages);
+
 enum gic_sgi_mode;
 
 /*