@@ -2327,10 +2327,16 @@ static int __init make_gicv3_domU_node(struct kernel_info *kinfo)
{
void *fdt = kinfo->fdt;
int res = 0;
- __be32 reg[(GUEST_ROOT_ADDRESS_CELLS + GUEST_ROOT_SIZE_CELLS) * 2];
- __be32 *cells;
+ __be32 *reg, *cells;
+ const struct domain *d = kinfo->d;
+ /* Placeholder for interrupt-controller@ + a 64-bit number + \0 */
+ char buf[38];
+ unsigned int i, len = 0;
+
+ snprintf(buf, sizeof(buf), "interrupt-controller@%"PRIx64,
+ vgic_dist_base(&d->arch.vgic));
- res = fdt_begin_node(fdt, "interrupt-controller@"__stringify(GUEST_GICV3_GICD_BASE));
+ res = fdt_begin_node(fdt, buf);
if ( res )
return res;
@@ -2350,13 +2356,25 @@ static int __init make_gicv3_domU_node(struct kernel_info *kinfo)
if ( res )
return res;
- cells = ®[0];
- dt_child_set_range(&cells, GUEST_ROOT_ADDRESS_CELLS, GUEST_ROOT_SIZE_CELLS,
- GUEST_GICV3_GICD_BASE, GUEST_GICV3_GICD_SIZE);
+ /* reg specifies all re-distributors and Distributor. */
+ len = (GUEST_ROOT_ADDRESS_CELLS + GUEST_ROOT_SIZE_CELLS) *
+ (d->arch.vgic.nr_regions + 1) * sizeof(__be32);
+ reg = xmalloc_bytes(len);
+ if ( reg == NULL )
+ return -ENOMEM;
+ cells = reg;
+
dt_child_set_range(&cells, GUEST_ROOT_ADDRESS_CELLS, GUEST_ROOT_SIZE_CELLS,
- GUEST_GICV3_GICR0_BASE, GUEST_GICV3_GICR0_SIZE);
+ vgic_dist_base(&d->arch.vgic), GUEST_GICV3_GICD_SIZE);
- res = fdt_property(fdt, "reg", reg, sizeof(reg));
+ for ( i = 0; i < d->arch.vgic.nr_regions; i++ )
+ dt_child_set_range(&cells,
+ GUEST_ROOT_ADDRESS_CELLS, GUEST_ROOT_SIZE_CELLS,
+ d->arch.vgic.rdist_regions[i].base,
+ d->arch.vgic.rdist_regions[i].size);
+
+ res = fdt_property(fdt, "reg", reg, len);
+ xfree(reg);
if (res)
return res;
@@ -31,6 +31,20 @@ enum domain_type {
#define is_domain_direct_mapped(d) (d)->arch.directmap
+/*
+ * Is the domain using the host memory layout?
+ *
+ * Direct-mapped domain will always have the RAM mapped with GFN == MFN.
+ * To avoid any trouble finding space, it is easier to force using the
+ * host memory layout.
+ *
+ * The hardware domain will use the host layout regardless of
+ * direct-mapped because some OS may rely on a specific address ranges
+ * for the devices.
+ */
+#define domain_use_host_layout(d) (is_domain_direct_mapped(d) || \
+ is_hardware_domain(d))
+
struct vtimer {
struct vcpu *v;
int irq;
@@ -1640,14 +1640,15 @@ static inline unsigned int vgic_v3_max_rdist_count(struct domain *d)
* Normally there is only one GICv3 redistributor region.
* The GICv3 DT binding provisions for multiple regions, since there are
* platforms out there which need those (multi-socket systems).
- * For Dom0 we have to live with the MMIO layout the hardware provides,
- * so we have to copy the multiple regions - as the first region may not
- * provide enough space to hold all redistributors we need.
- * However DomU get a constructed memory map, so we can go with
- * the architected single redistributor region.
+ * For domain using the host memory layout, we have to live with the MMIO
+ * layout the hardware provides, so we have to copy the multiple regions
+ * - as the first region may not provide enough space to hold all
+ * redistributors we need.
+ * All the other domains will get a constructed memory map, so we can go
+ * with the architected single redistributor region.
*/
- return is_hardware_domain(d) ? vgic_v3_hw.nr_rdist_regions :
- GUEST_GICV3_RDIST_REGIONS;
+ return domain_use_host_layout(d) ? vgic_v3_hw.nr_rdist_regions :
+ GUEST_GICV3_RDIST_REGIONS;
}
static int vgic_v3_domain_init(struct domain *d)
@@ -1669,10 +1670,11 @@ static int vgic_v3_domain_init(struct domain *d)
radix_tree_init(&d->arch.vgic.pend_lpi_tree);
/*
- * Domain 0 gets the hardware address.
- * Guests get the virtual platform layout.
+ * For domain using the host memory layout, it gets the hardware
+ * address.
+ * Other domains get the virtual platform layout.
*/
- if ( is_hardware_domain(d) )
+ if ( domain_use_host_layout(d) )
{
unsigned int first_cpu = 0;
@@ -1695,10 +1697,10 @@ static int vgic_v3_domain_init(struct domain *d)
}
/*
- * The hardware domain may not use all the re-distributors
- * regions (e.g when the number of vCPUs does not match the
- * number of pCPUs). Update the number of regions to avoid
- * exposing unused region as they will not get emulated.
+ * For domain using the host memory layout, it may not use all
+ * the re-distributors regions (e.g when the number of vCPUs does
+ * not match the number of pCPUs). Update the number of regions to
+ * avoid exposing unused region as they will not get emulated.
*/
d->arch.vgic.nr_regions = i + 1;