diff mbox series

[XEN,v2,03/25] arm: new VGIC: Add GICv3 MMIO handling framework

Message ID cfd46345ad96a447c0dccc371f074a47ed612259.1699618395.git.mykyta_poturai@epam.com (mailing list archive)
State New, archived
Headers show
Series arm: Add GICv3 support to the New VGIC | expand

Commit Message

Mykyta Poturai Nov. 10, 2023, 12:56 p.m. UTC
Create a new file called vgic-mmio-v3.c and describe the GICv3
distributor and redistributor registers there.
Also we provide a function to deal with the registration of the
separate redistributor frames per VCPU and allocation of redistributor
regions.

Based on Linux commits:
ed9b8cefa91695 by Andre Przywara
1aab6f468c10a1 by Christoffer Dall
ccc27bf5be7b78 by Eric Auger

Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>
---
 xen/arch/arm/include/asm/gic_v3_defs.h |   2 +
 xen/arch/arm/include/asm/new_vgic.h    |  18 +-
 xen/arch/arm/vgic/vgic-init.c          |  10 +-
 xen/arch/arm/vgic/vgic-mmio-v3.c       | 435 +++++++++++++++++++++++++
 xen/arch/arm/vgic/vgic-mmio.c          |   3 +
 xen/arch/arm/vgic/vgic.h               |  15 +
 6 files changed, 475 insertions(+), 8 deletions(-)
 create mode 100644 xen/arch/arm/vgic/vgic-mmio-v3.c
diff mbox series

Patch

diff --git a/xen/arch/arm/include/asm/gic_v3_defs.h b/xen/arch/arm/include/asm/gic_v3_defs.h
index 227533868f..b7059635d7 100644
--- a/xen/arch/arm/include/asm/gic_v3_defs.h
+++ b/xen/arch/arm/include/asm/gic_v3_defs.h
@@ -35,6 +35,7 @@ 
 #define GICD_IROUTER                 (0x6000)
 #define GICD_IROUTER32               (0x6100)
 #define GICD_IROUTER1019             (0x7FD8)
+#define GICD_IDREGS                  (0xFFD0)
 #define GICD_PIDR2                   (0xFFE8)
 
 /* Common between GICD_PIDR2 and GICR_PIDR2 */
@@ -89,6 +90,7 @@ 
 #define GICR_INVLPIR                 (0x00A0)
 #define GICR_INVALLR                 (0x00B0)
 #define GICR_SYNCR                   (0x00C0)
+#define GICR_IDREGS                  GICD_IDREGS
 #define GICR_PIDR2                   GICD_PIDR2
 
 /* GICR for SGI's & PPI's */
diff --git a/xen/arch/arm/include/asm/new_vgic.h b/xen/arch/arm/include/asm/new_vgic.h
index 1e76213893..11d8f71851 100644
--- a/xen/arch/arm/include/asm/new_vgic.h
+++ b/xen/arch/arm/include/asm/new_vgic.h
@@ -31,6 +31,8 @@ 
 #define VGIC_MAX_SPI            1019
 #define VGIC_MAX_RESERVED       1023
 #define VGIC_MIN_LPI            8192
+#define VGIC_V3_DIST_SIZE       SZ_64K
+#define VGIC_V3_REDIST_SIZE     (2 * SZ_64K)
 
 #define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS)
 #define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \
@@ -94,6 +96,14 @@  enum iodev_type {
     IODEV_REDIST,
 };
 
+struct vgic_redist_region {
+    uint32_t index;
+    paddr_t base;
+    uint32_t count; /* number of redistributors or 0 if single region */
+    uint32_t free_index; /* index of the next free redistributor */
+    struct list_head list;
+};
+
 struct vgic_io_device {
     gfn_t base_fn;
     struct vcpu *redist_vcpu;
@@ -121,11 +131,7 @@  struct vgic_dist {
         /* either a GICv2 CPU interface */
         paddr_t         cbase;
         /* or a number of GICv3 redistributor regions */
-        struct
-        {
-            paddr_t     vgic_redist_base;
-            paddr_t     vgic_redist_free_offset;
-        };
+        struct list_head rd_regions;
     };
     paddr_t             csize; /* CPU interface size */
     paddr_t             vbase; /* virtual CPU interface base address */
@@ -174,6 +180,8 @@  struct vgic_cpu {
      * parts of the redistributor.
      */
     struct vgic_io_device   rd_iodev;
+    struct vgic_redist_region *rdreg;
+    uint32_t rdreg_index;
     struct vgic_io_device   sgi_iodev;
 
     /* Contains the attributes and gpa of the LPI pending tables. */
diff --git a/xen/arch/arm/vgic/vgic-init.c b/xen/arch/arm/vgic/vgic-init.c
index f8d7d3a226..c3b34be192 100644
--- a/xen/arch/arm/vgic/vgic-init.c
+++ b/xen/arch/arm/vgic/vgic-init.c
@@ -107,14 +107,18 @@  int domain_vgic_register(struct domain *d, unsigned int *mmio_count)
     {
     case GIC_V2:
         *mmio_count = 1;
+        d->arch.vgic.cbase = VGIC_ADDR_UNDEF;
         break;
+    case GIC_V3:
+        *mmio_count = 2;
+        INIT_LIST_HEAD(&d->arch.vgic.rd_regions);
+        break;
+
     default:
         BUG();
     }
 
     d->arch.vgic.dbase = VGIC_ADDR_UNDEF;
-    d->arch.vgic.cbase = VGIC_ADDR_UNDEF;
-    d->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
 
     return 0;
 }
@@ -207,7 +211,7 @@  int vcpu_vgic_init(struct vcpu *v)
     if ( gic_hw_version() == GIC_V2 )
         vgic_v2_enable(v);
     else
-        ret = -ENXIO;
+        vgic_register_redist_iodev(v);
 
     return ret;
 }
diff --git a/xen/arch/arm/vgic/vgic-mmio-v3.c b/xen/arch/arm/vgic/vgic-mmio-v3.c
new file mode 100644
index 0000000000..b79a63ce3e
--- /dev/null
+++ b/xen/arch/arm/vgic/vgic-mmio-v3.c
@@ -0,0 +1,435 @@ 
+/*
+ * VGICv3 MMIO handling functions
+ * Imported from Linux ("new" KVM VGIC) and heavily adapted to Xen.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <xen/bitops.h>
+#include <xen/sched.h>
+#include <xen/sizes.h>
+#include <asm/new_vgic.h>
+#include <asm/gic_v3_defs.h>
+#include <asm/vreg.h>
+
+#include "vgic.h"
+#include "vgic-mmio.h"
+
+static const struct vgic_register_region vgic_v3_dist_registers[] = {
+    REGISTER_DESC_WITH_LENGTH(GICD_CTLR,
+        vgic_mmio_read_raz, vgic_mmio_write_wi,
+        16, VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(GICD_STATUSR,
+        vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IGROUPR,
+        vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER,
+        vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER,
+        vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR,
+        vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICPENDR,
+        vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISACTIVER,
+        vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICACTIVER,
+        vgic_mmio_read_active, vgic_mmio_write_cactive,
+        1, VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IPRIORITYR,
+        vgic_mmio_read_priority, vgic_mmio_write_priority,
+        8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+    REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ITARGETSR,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+        VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+    REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICFGR,
+        vgic_mmio_read_config, vgic_mmio_write_config, 2,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IGRPMODR,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_IROUTER,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 64,
+        VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(GICD_IDREGS,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 48,
+        VGIC_ACCESS_32bit),
+};
+
+static const struct vgic_register_region vgic_v3_rd_registers[] = {
+    /* RD_base registers */
+    REGISTER_DESC_WITH_LENGTH(GICR_CTLR,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(GICR_STATUSR,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+        VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+        VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+        VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(GICR_INVLPIR,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+        VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(GICR_INVALLR,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+        VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(GICR_SYNCR,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 48,
+        VGIC_ACCESS_32bit),
+    /* SGI_base registers */
+    REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGROUPR0,
+        vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ISENABLER0,
+        vgic_mmio_read_enable, vgic_mmio_write_senable, 4,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICENABLER0,
+        vgic_mmio_read_enable, vgic_mmio_write_cenable, 4,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ISPENDR0,
+        vgic_mmio_read_pending, vgic_mmio_write_spending, 4,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICPENDR0,
+        vgic_mmio_read_pending, vgic_mmio_write_cpending,4,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ISACTIVER0,
+        vgic_mmio_read_active, vgic_mmio_write_sactive, 4,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICACTIVER0,
+        vgic_mmio_read_active, vgic_mmio_write_cactive, 4,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0,
+        vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
+        VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+    REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICFGR0,
+        vgic_mmio_read_config, vgic_mmio_write_config, 8,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGRPMODR0,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+        VGIC_ACCESS_32bit),
+    REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_NSACR,
+        vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+        VGIC_ACCESS_32bit),
+};
+
+unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
+{
+    dev->regions    = vgic_v3_dist_registers;
+    dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
+
+    return SZ_64K;
+}
+
+static bool vgic_v3_redist_region_full(struct vgic_redist_region *region)
+{
+    if ( !region->count )
+        return false;
+
+    return (region->free_index >= region->count);
+}
+
+/**
+ * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
+ * which has free space to put a new rdist region.
+ *
+ * @rd_regions: redistributor region list head
+ *
+ * A redistributor regions maps n redistributors, n = region size / (2 x 64kB).
+ * Stride between redistributors is 0 and regions are filled in the index order.
+ *
+ * Return: the redist region handle, if any, that has space to map a new rdist
+ * region.
+ */
+static struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions)
+{
+    struct vgic_redist_region *rdreg;
+
+    list_for_each_entry(rdreg, rd_regions, list)
+    {
+        if ( !vgic_v3_redist_region_full(rdreg) )
+            return rdreg;
+    }
+    return NULL;
+}
+
+
+/**
+ * vgic_register_redist_iodev - register a single redist iodev
+ * @vcpu:    The VCPU to which the redistributor belongs
+ *
+ * Register a KVM iodev for this VCPU's redistributor using the address
+ * provided.
+ *
+ * Return 0 on success, -ERRNO otherwise.
+ */
+int vgic_register_redist_iodev(struct vcpu *vcpu)
+{
+    struct domain *d              = vcpu->domain;
+    struct vgic_dist *vgic        = &d->arch.vgic;
+    struct vgic_cpu *vgic_cpu     = &vcpu->arch.vgic;
+    struct vgic_io_device *rd_dev = &vcpu->arch.vgic.rd_iodev;
+    struct vgic_redist_region *rdreg;
+    paddr_t rd_base;
+
+    /*
+     * We may be creating VCPUs before having set the base address for the
+     * redistributor region, in which case we will come back to this
+     * function for all VCPUs when the base address is set.  Just return
+     * without doing any work for now.
+     */
+    rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
+    if ( !rdreg )
+        return 0;
+
+    vgic_cpu->rdreg       = rdreg;
+    vgic_cpu->rdreg_index = rdreg->free_index;
+
+    rd_base             = rdreg->base + rdreg->free_index * VGIC_V3_REDIST_SIZE;
+
+    rd_dev->base_fn     = gaddr_to_gfn(rd_base);
+    rd_dev->iodev_type  = IODEV_REDIST;
+    rd_dev->regions     = vgic_v3_rd_registers;
+    rd_dev->nr_regions  = ARRAY_SIZE(vgic_v3_rd_registers);
+    rd_dev->redist_vcpu = vcpu;
+
+    register_mmio_handler(d, &vgic_io_ops, rd_base, VGIC_V3_REDIST_SIZE,
+                          rd_dev);
+
+    rdreg->free_index++;
+    return 0;
+}
+
+static int vgic_register_all_redist_iodevs(struct domain *d)
+{
+    struct vcpu *vcpu;
+    int ret = 0;
+
+    for_each_vcpu(d, vcpu)
+    {
+        ret = vgic_register_redist_iodev(vcpu);
+        if ( ret )
+            break;
+    }
+
+    if ( ret )
+    {
+        printk(XENLOG_ERR "Failed to register redistributor iodev\n");
+    }
+
+    return ret;
+}
+
+static inline size_t vgic_v3_rd_region_size(struct domain *d,
+                                            struct vgic_redist_region *rdreg)
+{
+    if ( !rdreg->count )
+        return d->max_vcpus * VGIC_V3_REDIST_SIZE;
+    else
+        return rdreg->count * VGIC_V3_REDIST_SIZE;
+}
+
+/**
+ * vgic_v3_rdist_overlap - check if a region overlaps with any
+ * existing redistributor region
+ *
+ * @kvm: kvm handle
+ * @base: base of the region
+ * @size: size of region
+ *
+ * Return: true if there is an overlap
+ */
+bool vgic_v3_rdist_overlap(struct domain *domain, paddr_t base, size_t size)
+{
+    struct vgic_dist *d = &domain->arch.vgic;
+    struct vgic_redist_region *rdreg;
+
+    list_for_each_entry(rdreg, &d->rd_regions, list)
+    {
+        if ( (base + size > rdreg->base) &&
+             (base < rdreg->base + vgic_v3_rd_region_size(domain, rdreg)) )
+            return true;
+    }
+    return false;
+}
+
+static inline bool vgic_dist_overlap(struct domain *domain, paddr_t base,
+                                     size_t size)
+{
+    struct vgic_dist *d = &domain->arch.vgic;
+
+    return (base + size > d->dbase) && (base < d->dbase + VGIC_V3_DIST_SIZE);
+}
+
+struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct domain *d,
+                                                           u32 index)
+{
+    struct list_head *rd_regions = &d->arch.vgic.rd_regions;
+    struct vgic_redist_region *rdreg;
+
+    list_for_each_entry(rdreg, rd_regions, list)
+    {
+        if ( rdreg->index == index )
+            return rdreg;
+    }
+    return NULL;
+}
+
+int vgic_check_iorange(paddr_t ioaddr, paddr_t addr, paddr_t alignment,
+                       paddr_t size)
+{
+    if ( !IS_VGIC_ADDR_UNDEF(ioaddr) )
+        return -EEXIST;
+
+    if ( !IS_ALIGNED(addr, alignment) || !IS_ALIGNED(size, alignment) )
+        return -EINVAL;
+
+    if ( addr + size < addr )
+        return -EINVAL;
+
+    return 0;
+}
+
+/**
+ * vgic_v3_alloc_redist_region - Allocate a new redistributor region
+ *
+ * Performs various checks before inserting the rdist region in the list.
+ * Those tests depend on whether the size of the rdist region is known
+ * (ie. count != 0). The list is sorted by rdist region index.
+ *
+ * @kvm: kvm handle
+ * @index: redist region index
+ * @base: base of the new rdist region
+ * @count: number of redistributors the region is made of (0 in the old style
+ * single region, whose size is induced from the number of vcpus)
+ *
+ * Return 0 on success, < 0 otherwise
+ */
+static int vgic_v3_alloc_redist_region(struct domain *domain, uint32_t index,
+                                       paddr_t base, uint32_t count)
+{
+    struct vgic_dist *d = &domain->arch.vgic;
+    struct vgic_redist_region *rdreg;
+    struct list_head *rd_regions = &d->rd_regions;
+    int nr_vcpus                 = domain->max_vcpus;
+    size_t size                  = count ? count * VGIC_V3_REDIST_SIZE
+                                         : nr_vcpus * VGIC_V3_REDIST_SIZE;
+    int ret;
+
+    /* cross the end of memory ? */
+    if ( base + size < base )
+        return -EINVAL;
+
+    if ( list_empty(rd_regions) )
+    {
+        if ( index != 0 )
+            return -EINVAL;
+    }
+    else
+    {
+        rdreg = list_last_entry(rd_regions, struct vgic_redist_region, list);
+
+        /* Don't mix single region and discrete redist regions */
+        if ( !count && rdreg->count )
+            return -EINVAL;
+
+        if ( !count )
+            return -EEXIST;
+
+        if ( index != rdreg->index + 1 )
+            return -EINVAL;
+    }
+
+    /*
+     * For legacy single-region redistributor regions (!count),
+     * check that the redistributor region does not overlap with the
+     * distributor's address space.
+     */
+    if ( !count && !IS_VGIC_ADDR_UNDEF(d->dbase) &&
+         vgic_dist_overlap(domain, base, size) )
+        return -EINVAL;
+
+    /* collision with any other rdist region? */
+    if ( vgic_v3_rdist_overlap(domain, base, size) )
+        return -EINVAL;
+
+    rdreg = xzalloc(struct vgic_redist_region);
+    if ( !rdreg )
+        return -ENOMEM;
+
+    rdreg->base = VGIC_ADDR_UNDEF;
+
+    ret = vgic_check_iorange(rdreg->base, base, SZ_64K, size);
+    if ( ret )
+        goto free;
+
+    rdreg->base       = base;
+    rdreg->count      = count;
+    rdreg->free_index = 0;
+    rdreg->index      = index;
+
+    list_add_tail(&rdreg->list, rd_regions);
+    return 0;
+free:
+    xfree(rdreg);
+    return ret;
+}
+
+void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
+{
+    list_del(&rdreg->list);
+    xfree(rdreg);
+}
+
+int vgic_v3_set_redist_base(struct domain *d, u32 index, u64 addr, u32 count)
+{
+    int ret;
+
+    ret = vgic_v3_alloc_redist_region(d, index, addr, count);
+    if ( ret )
+        return ret;
+
+    /*
+     * Register iodevs for each existing VCPU.  Adding more VCPUs
+     * afterwards will register the iodevs when needed.
+     */
+    ret = vgic_register_all_redist_iodevs(d);
+    if ( ret )
+    {
+        struct vgic_redist_region *rdreg;
+
+        rdreg = vgic_v3_rdist_region_from_index(d, index);
+        vgic_v3_free_redist_region(rdreg);
+        return ret;
+    }
+
+    return 0;
+}
diff --git a/xen/arch/arm/vgic/vgic-mmio.c b/xen/arch/arm/vgic/vgic-mmio.c
index 5d935a7301..1c3f861887 100644
--- a/xen/arch/arm/vgic/vgic-mmio.c
+++ b/xen/arch/arm/vgic/vgic-mmio.c
@@ -619,6 +619,9 @@  int vgic_register_dist_iodev(struct domain *d, gfn_t dist_base_fn,
     case VGIC_V2:
         len = vgic_v2_init_dist_iodev(io_device);
         break;
+    case VGIC_V3:
+        len = vgic_v3_init_dist_iodev(io_device);
+        break;
     default:
         BUG();
     }
diff --git a/xen/arch/arm/vgic/vgic.h b/xen/arch/arm/vgic/vgic.h
index 2819569f30..a8e3ef5970 100644
--- a/xen/arch/arm/vgic/vgic.h
+++ b/xen/arch/arm/vgic/vgic.h
@@ -73,6 +73,9 @@  int vgic_register_dist_iodev(struct domain *d, gfn_t dist_base_fn,
 #ifdef CONFIG_GICV3
 void vgic_v3_fold_lr_state(struct vcpu *vcpu);
 void vgic_v3_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, int lr);
+unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev);
+int vgic_v3_set_redist_base(struct domain *d, u32 index, u64 addr, u32 count);
+int vgic_register_redist_iodev(struct vcpu *vcpu);
 #else
 static inline void vgic_v3_fold_lr_state(struct vcpu *vcpu)
 {
@@ -80,6 +83,18 @@  static inline void vgic_v3_fold_lr_state(struct vcpu *vcpu)
 static inline void vgic_v3_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, int lr)
 {
 }
+static inline unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
+{
+    return 0;
+}
+static inline int vgic_v3_set_redist_base(struct domain *d, u32 index, u64 addr, u32 count)
+{
+    return 0;
+}
+static inline int vgic_register_redist_iodev(struct vcpu *vcpu)
+{
+    return 0;
+}
 #endif /* CONFIG_GICV3 */
 
 #endif /* __XEN_ARM_VGIC_VGIC_H__ */