diff mbox

[v8,21/28] xen/arm: ITS: Add GICR register emulation

Message ID 1454318798-31913-22-git-send-email-vijayak@caviumnetworks.com (mailing list archive)
State New, archived
Headers show

Commit Message

vijayak@caviumnetworks.com Feb. 1, 2016, 9:26 a.m. UTC
From: Vijaya Kumar K <Vijaya.Kumar@caviumnetworks.com>

Emulate LPI related changes to GICR registers

Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@caviumnetworks.com>
---
v8: - Updated GICR_PROPBASER and GICR_PENDBASER only on enabling
      LPIs and added 32/64 bit reg access
    - Allocated LPI property table of size supported by Xen.
    - Moved LPI property table related variables to vgic from vits
      structure.
    - Used Tasklet to parse LPI property table and update pending irq
      structures
    - Check is made in INV command processing to wait for LPI
      property table update before returning.
v7: - Merged patch#23 to this patch. patch#23 is just one line.
    - Add 32-bit access to GICR_TYPER register
    - Changed dprintk to printk
    - LPI property table is handling is changed. Call {enable,disable}_lpi
      only for nr_lpis.
    - Changes to GICD_TYPER emulation.
v6: - Moved LPI handling code to vgic-v3.c
    - parse LPI property table on GICR_PROPBASER update
    - use vgic_is_lpi_supported()
v5: - Handled all sizes access to LPI configuration table
    - Rename vits_unmap_lpi_prop as  vits_map_lpi_prop
v4: - Added LPI configuration table emulation
    - Rename function inline with vits
    - Copied guest lpi configuration table to xen
---
 xen/arch/arm/vgic-v3-its.c        |   18 ++
 xen/arch/arm/vgic-v3.c            |  419 +++++++++++++++++++++++++++++++++++--
 xen/arch/arm/vgic.c               |   24 ++-
 xen/include/asm-arm/domain.h      |   31 +++
 xen/include/asm-arm/gic-its.h     |    1 +
 xen/include/asm-arm/gic_v3_defs.h |    3 +
 xen/include/asm-arm/vgic.h        |    3 +
 7 files changed, 479 insertions(+), 20 deletions(-)
diff mbox

Patch

diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index 913b49d..1bb7674 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -23,12 +23,14 @@ 
 #include <xen/sched.h>
 #include <xen/sizes.h>
 #include <xen/domain_page.h>
+#include <xen/delay.h>
 #include <asm/device.h>
 #include <asm/mmio.h>
 #include <asm/io.h>
 #include <asm/atomic.h>
 #include <asm/gic_v3_defs.h>
 #include <asm/gic.h>
+#include <asm/gic-its.h>
 #include <asm/vgic.h>
 #include <asm/gic-its.h>
 #include <asm/vits.h>
@@ -299,6 +301,22 @@  static int vits_process_discard(struct vcpu *v, struct vgic_its *vits,
 static int vits_process_inv(struct vcpu *v, struct vgic_its *vits,
                             its_cmd_block *virt_cmd)
 {
+    unsigned long flags;
+    int state;
+
+    /* Do no complete INV command until lpi property table
+     * is under update by tasklet
+     */
+    do {
+        spin_lock_irqsave(&v->domain->arch.vgic.prop_lock, flags);
+        state = v->domain->arch.vgic.lpi_prop_table_state;
+        spin_unlock_irqrestore(&v->domain->arch.vgic.prop_lock, flags);
+        if ( state != LPI_TAB_IN_PROGRESS )
+            break;
+        cpu_relax();
+        udelay(1);
+    } while ( 1 );
+
     /* Ignored */
     DPRINTK("%pv vITS: INV: dev_id 0x%"PRIx32" id %"PRIu32"\n",
             v, virt_cmd->inv.devid, virt_cmd->inv.event);
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index 2deed24..aa3c5ed 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -26,10 +26,14 @@ 
 #include <xen/irq.h>
 #include <xen/sched.h>
 #include <xen/sizes.h>
+#include <xen/tasklet.h>
 #include <asm/current.h>
 #include <asm/mmio.h>
 #include <asm/gic_v3_defs.h>
+#include <asm/gic.h>
+#include <asm/gic-its.h>
 #include <asm/vgic.h>
+#include <asm/vits.h>
 #include <asm/vgic-emul.h>
 
 /*
@@ -159,6 +163,252 @@  static void vgic_store_irouter(struct domain *d, struct vgic_irq_rank *rank,
     rank->vcpu[offset] = new_vcpu->vcpu_id;
 }
 
+static void vgic_v3_disable_lpi(struct vcpu *v, uint32_t vlpi)
+{
+    struct pending_irq *p;
+    struct vcpu *v_target = v->domain->vcpu[0];
+
+    p = irq_to_pending(v_target, vlpi);
+    if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
+    {
+        clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
+        gic_remove_from_queues(v_target, vlpi);
+    }
+}
+
+static void vgic_v3_enable_lpi(struct vcpu *v, uint32_t vlpi)
+{
+    struct pending_irq *p;
+    unsigned long flags;
+    struct vcpu *v_target = vgic_get_target_vcpu(v, vlpi);
+
+    p = irq_to_pending(v_target, vlpi);
+
+    set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
+
+    spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
+
+    if ( !list_empty(&p->inflight) &&
+         !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
+        gic_raise_guest_irq(v_target, vlpi, p->priority);
+
+    spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
+}
+
+static int vgic_v3_gits_lpi_mmio_read(struct vcpu *v, mmio_info_t *info,
+                                      register_t *r, void *priv)
+{
+    uint32_t offset;
+    void *addr;
+    unsigned long flags;
+    struct hsr_dabt dabt = info->dabt;
+
+    spin_lock_irqsave(&v->domain->arch.vgic.prop_lock, flags);
+
+    offset = info->gpa -
+                 (v->domain->arch.vgic.propbase & GICR_PROPBASER_PA_MASK);
+    addr = (void *)((u8 *)v->domain->arch.vgic.prop_page + offset);
+
+    switch (dabt.size)
+    {
+    case DABT_DOUBLE_WORD:
+        *r = *((u64 *)addr);
+        break;
+    case DABT_WORD:
+        *r = *((u32 *)addr);
+        break;
+    case DABT_HALF_WORD:
+        *r = *((u16 *)addr);
+        break;
+    default:
+        *r = *((u8 *)addr);
+    }
+    spin_unlock_irqrestore(&v->domain->arch.vgic.prop_lock, flags);
+
+    return 1;
+}
+
+static void vgic_v3_gits_update_lpis_state(struct vcpu *v, uint32_t vid,
+                                           uint32_t size)
+{
+    uint32_t i;
+    uint8_t cfg, *p;
+    bool_t enable;
+
+    ASSERT(spin_is_locked(&v->domain->arch.vgic.prop_lock));
+
+    p = ((u8 *)v->domain->arch.vgic.prop_page + vid);
+
+    for ( i = 0 ; i < size; i++ )
+    {
+        cfg = *p;
+        enable = cfg & LPI_PROP_ENABLED;
+
+        /* LPIs start from 8192, So add 8192 to point to correct LPI number */
+        if ( !enable )
+            vgic_v3_enable_lpi(v, vid + FIRST_GIC_LPI);
+        else
+            vgic_v3_disable_lpi(v, vid + FIRST_GIC_LPI);
+
+        p++;
+        vid++;
+    }
+}
+
+static int vgic_v3_gits_lpi_mmio_write(struct vcpu *v, mmio_info_t *info,
+                                       register_t r, void *priv)
+{
+    uint32_t offset;
+    uint8_t *p, *val, i, iter;
+    bool_t enable;
+    unsigned long flags;
+    struct hsr_dabt dabt = info->dabt;
+
+    spin_lock_irqsave(&v->domain->arch.vgic.prop_lock, flags);
+
+    offset = info->gpa -
+                 (v->domain->arch.vgic.propbase & GICR_PROPBASER_PA_MASK);
+
+    p = ((u8 *)v->domain->arch.vgic.prop_page + offset);
+    val = (u8 *)&r;
+    iter = 1 << dabt.size;
+
+    for ( i = 0 ; i < iter; i++ )
+    {
+        enable = (*p & *val) & LPI_PROP_ENABLED;
+
+        if ( !enable )
+            vgic_v3_enable_lpi(v, offset + FIRST_GIC_LPI);
+        else
+            vgic_v3_disable_lpi(v, offset + FIRST_GIC_LPI);
+
+        /* Update virtual prop page */
+        val++;
+        p++;
+        offset++;
+    }
+
+    spin_unlock_irqrestore(&v->domain->arch.vgic.prop_lock, flags);
+
+    return 1;
+}
+
+static const struct mmio_handler_ops vgic_gits_lpi_mmio_handler = {
+    .read  = vgic_v3_gits_lpi_mmio_read,
+    .write = vgic_v3_gits_lpi_mmio_write,
+};
+
+static void lpi_prop_table_tasklet_func(unsigned long data)
+{
+    unsigned long flags;
+    uint32_t i, id_bits, lpi_size;
+    struct domain *d = (struct domain *)data;
+    struct vcpu *v = d->vcpu[0];
+
+    spin_lock_irqsave(&v->domain->arch.vgic.prop_lock, flags);
+
+    id_bits = ((v->domain->arch.vgic.propbase & GICR_PROPBASER_IDBITS_MASK) +
+               1);
+    lpi_size = min_t(unsigned int, v->domain->arch.vgic.prop_size, 1 << id_bits);
+
+    for ( i = 0; i < lpi_size / PAGE_SIZE; i++ )
+    {
+        if ( ((i * PAGE_SIZE) + PAGE_SIZE) < v->domain->arch.vgic.nr_lpis )
+            vgic_v3_gits_update_lpis_state(v, (i * PAGE_SIZE), PAGE_SIZE);
+    }
+
+    v->domain->arch.vgic.lpi_prop_table_state = LPI_TAB_UPDATED;
+    spin_unlock_irqrestore(&v->domain->arch.vgic.prop_lock, flags);
+}
+
+static int vgic_v3_map_lpi_prop(struct vcpu *v)
+{
+    paddr_t gaddr, addr;
+    unsigned long mfn, flags;
+    uint32_t id_bits, vgic_id_bits, lpi_size;
+    int i;
+
+    gaddr = v->domain->arch.vgic.propbase & GICR_PROPBASER_PA_MASK;
+    id_bits = ((v->domain->arch.vgic.propbase & GICR_PROPBASER_IDBITS_MASK) +
+               1);
+
+    vgic_id_bits = get_count_order(v->domain->arch.vgic.nr_lpis +
+                                   FIRST_GIC_LPI);
+
+    spin_lock_irqsave(&v->domain->arch.vgic.prop_lock, flags);
+
+    /*
+     * Here we limit the size of LPI property table to the number of LPIs
+     * that domain supports.
+     */
+    lpi_size = 1 << vgic_id_bits;
+
+    /*
+     * Allocate Virtual LPI Property table.
+     * TODO: To re-use guest property table
+     * TODO: Free prog_page table that is already allocated.
+     */
+    v->domain->arch.vgic.prop_page =
+        alloc_xenheap_pages(get_order_from_bytes(lpi_size), 0);
+    if ( !v->domain->arch.vgic.prop_page )
+    {
+        spin_unlock_irqrestore(&v->domain->arch.vgic.prop_lock, flags);
+        printk(XENLOG_G_ERR
+               "d%d: vITS: Fail to allocate LPI Prop page\n",
+               v->domain->domain_id);
+        return 0;
+    }
+
+    v->domain->arch.vgic.prop_size  = lpi_size;
+
+    /*
+     * lpi_size is always aligned to PAGE_SIZE because it is generated from
+     * vgic_id_bits which is computed using get_count_order.
+     *
+     * Consider minimum size of Xen supported size(vgic.nr_lpis) or
+     * guest allocated size LPI property table.
+     */
+    lpi_size = min_t(unsigned int, v->domain->arch.vgic.prop_size, 1 << id_bits);
+
+    addr = gaddr;
+    for ( i = 0; i < lpi_size / PAGE_SIZE; i++ )
+    {
+        vgic_access_guest_memory(v->domain, addr,
+                                 (void *)(v->domain->arch.vgic.prop_page +
+                                 (i * PAGE_SIZE)), PAGE_SIZE, 0);
+
+        /* Unmap LPI property table. */
+        mfn = gmfn_to_mfn(v->domain, paddr_to_pfn(addr));
+        if ( unlikely(!mfn_valid(mfn)) )
+        {
+            spin_unlock_irqrestore(&v->domain->arch.vgic.prop_lock, flags);
+            printk(XENLOG_G_ERR
+                   "vITS: Invalid propbaser address %"PRIx64" for domain %d\n",
+                   addr, v->domain->domain_id);
+            return 0;
+        }
+        guest_physmap_remove_page(v->domain, paddr_to_pfn(addr), mfn, 0);
+        addr += PAGE_SIZE;
+    }
+
+    /*
+     * Each re-distributor shares a common LPI configuration table
+     * So one set of mmio handlers to manage configuration table is enough
+     *
+     * Register mmio handlers for this region.
+     */
+    register_mmio_handler(v->domain, &vgic_gits_lpi_mmio_handler,
+                          gaddr, v->domain->arch.vgic.prop_size, NULL);
+
+    v->domain->arch.vgic.lpi_prop_table_state = LPI_TAB_IN_PROGRESS;
+    spin_unlock_irqrestore(&v->domain->arch.vgic.prop_lock, flags);
+
+    /* Schedule tasklet */
+    tasklet_schedule(&v->domain->arch.vgic.lpi_prop_table_tasklet);
+
+    return 1;
+}
+
 static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
                                          uint32_t gicr_reg,
                                          register_t *r)
@@ -168,9 +418,11 @@  static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
     switch ( gicr_reg )
     {
     case VREG32(GICR_CTLR):
-        /* We have not implemented LPI's, read zero */
-        goto read_as_zero_32;
-
+        if ( dabt.size != DABT_WORD ) goto bad_width;
+        vgic_lock(v);
+        *r = vgic_reg32_extract(v->arch.vgic.gicr_ctlr, info);
+        vgic_unlock(v);
+        return 1;
     case VREG32(GICR_IIDR):
         if ( dabt.size != DABT_WORD ) goto bad_width;
         *r = vgic_reg32_extract(GICV3_GICR_IIDR_VAL, info);
@@ -191,6 +443,12 @@  static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
         if ( v->arch.vgic.flags & VGIC_V3_RDIST_LAST )
             typer |= GICR_TYPER_LAST;
 
+        /* Set Physical LPIs support */
+        if ( vgic_is_lpi_supported(v->domain) )
+            typer |= GICR_TYPER_PLPIS;
+        /* Provide vcpu number as target address */
+        typer |= (v->vcpu_id << GICR_TYPER_PROCESSOR_SHIFT);
+
         *r = vgic_reg64_extract(typer, info);
 
         return 1;
@@ -222,12 +480,40 @@  static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
         goto read_reserved;
 
     case VREG64(GICR_PROPBASER):
-        /* LPI's not implemented */
-        goto read_as_zero_64;
+    {
+        uint32_t val;
+
+        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+        if ( !vgic_is_lpi_supported(v->domain) )
+            goto read_as_zero_64;
+
+        vgic_lock(v);
+        val = v->arch.vgic.gicr_ctlr;
+        vgic_unlock(v);
+
+        vgic_lpi_prop_lock(v);
+        if ( val & GICR_CTLR_ENABLE_LPIS )
+            *r = vgic_reg64_extract(v->domain->arch.vgic.propbase, info);
+        else
+            *r = vgic_reg64_extract(v->domain->arch.vgic.propbase_save, info);
+        vgic_lpi_prop_unlock(v);
+        return 1;
+    }
 
     case VREG64(GICR_PENDBASER):
-        /* LPI's not implemented */
-        goto read_as_zero_64;
+        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+        if ( !vgic_is_lpi_supported(v->domain) )
+            goto read_as_zero_64;
+        vgic_lock(v);
+        /* PTZ field is RAZ */
+        if ( v->arch.vgic.gicr_ctlr & GICR_CTLR_ENABLE_LPIS )
+            *r = vgic_reg64_extract(v->arch.vgic.pendbase &
+                                    ~GICR_PENDBASER_PTZ_MASK, info);
+        else
+            *r = vgic_reg64_extract(v->arch.vgic.pendbase_save &
+                                    ~GICR_PENDBASER_PTZ_MASK, info);
+        vgic_unlock(v);
+        return 1;
 
     case 0x0080:
         goto read_reserved;
@@ -329,13 +615,54 @@  static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
                                           register_t r)
 {
     struct hsr_dabt dabt = info->dabt;
+    uint32_t val;
 
     switch ( gicr_reg )
     {
     case VREG32(GICR_CTLR):
-        /* LPI's not implemented */
+        if ( dabt.size != DABT_WORD ) goto bad_width;
+        if ( !vgic_is_lpi_supported(v->domain) )
         goto write_ignore_32;
+        /*
+         * Enable LPI's for ITS. Direct injection of LPI
+         * by writing to GICR_{SET,CLR}LPIR is not supported.
+         */
+        vgic_lock(v);
+        val = v->arch.vgic.gicr_ctlr & GICR_CTLR_ENABLE_LPIS;
+        vgic_reg32_update(&v->arch.vgic.gicr_ctlr,
+                         (r & GICR_CTLR_ENABLE_LPIS), info);
+
+        /* If no change in GICR_CTRL.EnableLPIs. Just return */ 
+        if ( !((val ^ v->arch.vgic.gicr_ctlr) &&
+             (v->arch.vgic.gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) )
+        {
+            vgic_unlock(v);
+            return 1;
+        }
+        vgic_unlock(v);
 
+        vgic_lpi_prop_lock(v);
+        /*
+         * On GICR_CTLR.EnableLPIs = 1, update pendbaser and propbase
+         * with pendbase_save and propbase_save if they are changed.
+         */
+        if ( v->arch.vgic.pendbase_save ^ v->arch.vgic.pendbase )
+             v->arch.vgic.pendbase = v->arch.vgic.pendbase_save;
+        if ( v->domain->arch.vgic.propbase_save ^ v->domain->arch.vgic.propbase )
+        {
+            if ( v->vcpu_id == 0 )
+            {
+                v->domain->arch.vgic.propbase =
+                    v->domain->arch.vgic.propbase_save;
+                vgic_lpi_prop_unlock(v);
+                return vgic_v3_map_lpi_prop(v);
+            }
+            else
+                v->domain->arch.vgic.propbase =
+                    v->domain->arch.vgic.propbase_save;
+        }
+        vgic_lpi_prop_unlock(v);
+        return 1;
     case VREG32(GICR_IIDR):
         /* RO */
         goto write_ignore_32;
@@ -370,12 +697,51 @@  static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
         goto write_reserved;
 
     case VREG64(GICR_PROPBASER):
-        /* LPI is not implemented */
-        goto write_ignore_64;
+        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+        if ( !vgic_is_lpi_supported(v->domain) )
+            goto write_ignore_64;
+        /*
+         * As per spec, updating GICR_PROPBASER when GICR_CTLR.EnableLPIs = 1
+         * is unpredictable. Here we ignore the update.
+         */
+        vgic_lock(v);
+        val = v->arch.vgic.gicr_ctlr;
+        vgic_unlock(v);
+
+        if ( val & GICR_CTLR_ENABLE_LPIS )
+            goto write_ignore_64;
+        vgic_lpi_prop_lock(v);
+        /*
+         * LPI configuration tables are shared across cpus. Should be same.
+         *
+         * Allow updating on propbase only for vcpu0 once with below check.
+         * TODO: Manage change in property table.
+         */
+        if ( v->vcpu_id == 0 && v->domain->arch.vgic.propbase_save != 0 )
+        {
+            printk(XENLOG_G_WARNING
+                   "%pv: vGICR: Updating LPI propbase is not allowed\n", v);
+            vgic_lpi_prop_unlock(v);
+            return 1;
+        }
+        vgic_reg64_update(&v->domain->arch.vgic.propbase_save, r, info);
+        vgic_lpi_prop_unlock(v);
+        return 1;
 
     case VREG64(GICR_PENDBASER):
-        /* LPI is not implemented */
-        goto write_ignore_64;
+        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+        if ( !vgic_is_lpi_supported(v->domain) )
+            goto write_ignore_64;
+        /* Just hold pendbaser value for guest read */
+        vgic_lock(v);
+        if ( v->arch.vgic.gicr_ctlr & GICR_CTLR_ENABLE_LPIS )
+        {
+            vgic_unlock(v);
+            goto write_ignore_64;
+        }
+        vgic_reg64_update(&v->arch.vgic.pendbase_save, r, info);
+        vgic_unlock(v);
+        return 1;
 
     case 0x0080:
         goto write_reserved;
@@ -441,7 +807,7 @@  bad_width:
     return 0;
 
 write_ignore_64:
-    if ( vgic_reg64_check_access(dabt) ) goto bad_width;
+    if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
     return 1;
 
 write_ignore_32:
@@ -899,11 +1265,7 @@  static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
 
     case VREG32(GICD_TYPER):
     {
-        /*
-         * Number of interrupt identifier bits supported by the GIC
-         * Stream Protocol Interface
-         */
-        unsigned int irq_bits = get_count_order(vgic_num_irq_lines(v->domain));
+        unsigned int irqs;
         /*
          * Number of processors that may be used as interrupt targets when ARE
          * bit is zero. The maximum is 8.
@@ -916,7 +1278,15 @@  static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info,
         typer = ((ncpus - 1) << GICD_TYPER_CPUS_SHIFT |
                  DIV_ROUND_UP(v->domain->arch.vgic.nr_spis, 32));
 
-        typer |= (irq_bits - 1) << GICD_TYPER_ID_BITS_SHIFT;
+        if ( vgic_is_lpi_supported(v->domain) )
+        {
+            irqs = v->domain->arch.vgic.nr_lpis + FIRST_GIC_LPI;
+            typer |= GICD_TYPER_LPIS_SUPPORTED;
+        }
+        else
+            irqs = vgic_num_irq_lines(v->domain);
+
+        typer |= (get_count_order(irqs) - 1) << GICD_TYPER_ID_BITS_SHIFT;
 
         *r = vgic_reg32_extract(typer, info);
 
@@ -1459,6 +1829,17 @@  static int vgic_v3_domain_init(struct domain *d)
 
     d->arch.vgic.ctlr = VGICD_CTLR_DEFAULT;
 
+    spin_lock_init(&d->arch.vgic.prop_lock);
+
+    if ( is_hardware_domain(d) && vgic_v3_hw.its_support )
+    {
+        if ( vits_domain_init(d) )
+            return -ENODEV;
+
+        tasklet_init(&d->arch.vgic.lpi_prop_table_tasklet,
+                     lpi_prop_table_tasklet_func, (unsigned long)d);
+    }
+
     return 0;
 }
 
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 2d89b7c..8d75d90 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -67,6 +67,12 @@  bool_t vgic_is_lpi_supported(struct domain *d)
     return (d->arch.vgic.nr_lpis != 0);
 }
 
+static bool_t vgic_is_domain_lpi(struct domain *d, unsigned int lpi)
+{
+    return ((lpi >= FIRST_GIC_LPI) &&
+            (lpi < (d->arch.vgic.nr_lpis + FIRST_GIC_LPI)));
+}
+
 static void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
 {
     INIT_LIST_HEAD(&p->inflight);
@@ -186,6 +192,10 @@  void domain_vgic_free(struct domain *d)
     xfree(d->arch.vgic.shared_irqs);
     xfree(d->arch.vgic.pending_irqs);
     xfree(d->arch.vgic.allocated_irqs);
+
+    if ( vgic_is_lpi_supported(d) && d->arch.vgic.prop_page != NULL )
+        free_xenheap_pages(d->arch.vgic.prop_page,
+                           get_order_from_bytes(d->arch.vgic.prop_size));
 }
 
 int vcpu_vgic_init(struct vcpu *v)
@@ -232,9 +242,21 @@  static struct vcpu *__vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
 struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
 {
     struct vcpu *v_target;
-    struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
+    struct vgic_irq_rank *rank;
     unsigned long flags;
 
+    /*
+     * We don't have vlpi to plpi mapping and hence we cannot
+     * have target on which corresponding vlpi is enabled.
+     * So for now we are always injecting vlpi on vcpu0.
+     * (See vgic_vcpu_inject_lpi() function) and so we get pending_irq
+     * structure on vcpu0.
+     * TODO: Get correct target vcpu
+     */
+    if ( vgic_is_domain_lpi(v->domain, virq) )
+        return v->domain->vcpu[0];
+
+    rank = vgic_rank_irq(v, virq);
     vgic_lock_rank(v, rank, flags);
     v_target = __vgic_get_target_vcpu(v, virq);
     vgic_unlock_rank(v, rank, flags);
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 0904204..48dcd9a 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -12,6 +12,7 @@ 
 #include <public/hvm/params.h>
 #include <xen/serial.h>
 #include <xen/hvm/iommu.h>
+#include <xen/tasklet.h>
 
 struct hvm_domain
 {
@@ -116,6 +117,26 @@  struct arch_domain
         /* Virtual ITS */
         struct vgic_its *vits;
 #endif
+        /* LPI propbase */
+        uint64_t propbase;
+        /*
+         * Holds temparary GICR_PROPBASER register value. This value
+         * is used to update propbase after GICR_CTLR.EnableLPIs is set to 1.
+         * This helps to support 32-bit updates on GICR_PROPBASER
+         */
+        uint64_t propbase_save;
+        /* Virtual LPI property table */
+        void *prop_page;
+        /* Virtual LPI property size */
+        uint32_t prop_size;
+        /* spinlock to protect lpi property table */
+        spinlock_t prop_lock;
+#define LPI_TAB_IN_PROGRESS    1
+#define LPI_TAB_UPDATED        2
+        /* lpi property table state */
+        int lpi_prop_table_state;
+        /* Tasklet for parsing lpi property table */
+        struct tasklet lpi_prop_table_tasklet;
     } vgic;
 
     struct vuart {
@@ -248,6 +269,16 @@  struct arch_vcpu
 
         /* GICv3: redistributor base and flags for this vCPU */
         paddr_t rdist_base;
+        /* GICv3-ITS: LPI pending table for this vCPU */
+        uint64_t pendbase;
+        /*
+         * Holds temparary GICR_PENDBASER register value. This value
+         * is used to update propbase after GICR_CTLR.EnableLPIs is set to 1.
+         * This helps to support 32-bit updates on GICR_PENDBASER.
+         */
+        uint64_t pendbase_save;
+        /* GICv3: Redistributor control register */
+        uint32_t gicr_ctlr;
 #define VGIC_V3_RDIST_LAST  (1 << 0)        /* last vCPU of the rdist */
         uint8_t flags;
     } vgic;
diff --git a/xen/include/asm-arm/gic-its.h b/xen/include/asm-arm/gic-its.h
index 26b2b9e..aad51fa 100644
--- a/xen/include/asm-arm/gic-its.h
+++ b/xen/include/asm-arm/gic-its.h
@@ -114,6 +114,7 @@ 
 
 #define LPI_PROP_ENABLED                (1 << 0)
 #define LPI_PROP_GROUP1                 (1 << 1)
+#define LPI_PRIORITY_MASK               (0xfc)
 
 /*
  * Collection structure - just an ID, and a redistributor address to
diff --git a/xen/include/asm-arm/gic_v3_defs.h b/xen/include/asm-arm/gic_v3_defs.h
index 843da8a..bf3c53d 100644
--- a/xen/include/asm-arm/gic_v3_defs.h
+++ b/xen/include/asm-arm/gic_v3_defs.h
@@ -129,14 +129,17 @@ 
 #define GICR_PROPBASER_WaWb              (5U << 7)
 #define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
 #define GICR_PROPBASER_IDBITS_MASK       (0x1f)
+#define GICR_PROPBASER_PA_MASK           (0xfffffffff000UL)
 #define GICR_TYPER_PLPIS             (1U << 0)
 #define GICR_TYPER_VLPIS             (1U << 1)
 #define GICR_TYPER_LAST              (1U << 4)
+#define GICR_TYPER_PROCESSOR_SHIFT   (8)
 
 #define GICR_PENDBASER_InnerShareable    (1U << 10)
 #define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10)
 #define GICR_PENDBASER_nC                (1U << 7)
 #define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
+#define GICR_PENDBASER_PTZ_MASK          (1UL << 62)
 
 #define DEFAULT_PMR_VALUE            0xff
 
diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
index 35d06b8..473fd8e 100644
--- a/xen/include/asm-arm/vgic.h
+++ b/xen/include/asm-arm/vgic.h
@@ -143,6 +143,9 @@  struct vgic_ops {
 #define vgic_lock_rank(v, r, flags)   spin_lock_irqsave(&(r)->lock, flags)
 #define vgic_unlock_rank(v, r, flags) spin_unlock_irqrestore(&(r)->lock, flags)
 
+#define vgic_lpi_prop_lock(v)   spin_lock_irq(&(v)->domain->arch.vgic.prop_lock)
+#define vgic_lpi_prop_unlock(v) spin_unlock_irq(&(v)->domain->arch.vgic.prop_lock)
+
 /*
  * Rank containing GICD_<FOO><n> for GICD_<FOO> with
  * <b>-bits-per-interrupt