@@ -82,6 +82,7 @@
#define REGION_HYPERVISOR_BOOT (REGION_HYPERVISOR_RW|_REGION_BOOTONLY)
#define REGION_HYPERVISOR_SWITCH (REGION_HYPERVISOR_RW|_REGION_SWITCH)
#define REGION_HYPERVISOR_NOCACHE (_REGION_DEVICE|MT_DEVICE_nGnRE|_REGION_SWITCH)
+#define REGION_HYPERVISOR_WC (_REGION_DEVICE|MT_NORMAL_NC)
#define INVALID_REGION (~0UL)
@@ -14,6 +14,10 @@
# error "unknown ARM variant"
#endif
+#if defined(CONFIG_HAS_MPU)
+# include <asm/arm64/mpu.h>
+#endif
+
/* Align Xen to a 2 MiB boundary. */
#define XEN_PADDR_ALIGN (1 << 21)
@@ -198,19 +202,25 @@ extern void setup_frametable_mappings(paddr_t ps, paddr_t pe);
/* map a physical range in virtual memory */
void __iomem *ioremap_attr(paddr_t start, size_t len, unsigned int attributes);
+#ifndef CONFIG_HAS_MPU
+#define DEFINE_ATTRIBUTE(var) (PAGE_##var)
+#else
+#define DEFINE_ATTRIBUTE(var) (REGION_##var)
+#endif
+
static inline void __iomem *ioremap_nocache(paddr_t start, size_t len)
{
- return ioremap_attr(start, len, PAGE_HYPERVISOR_NOCACHE);
+ return ioremap_attr(start, len, DEFINE_ATTRIBUTE(HYPERVISOR_NOCACHE));
}
static inline void __iomem *ioremap_cache(paddr_t start, size_t len)
{
- return ioremap_attr(start, len, PAGE_HYPERVISOR);
+ return ioremap_attr(start, len, DEFINE_ATTRIBUTE(HYPERVISOR));
}
static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
{
- return ioremap_attr(start, len, PAGE_HYPERVISOR_WC);
+ return ioremap_attr(start, len, DEFINE_ATTRIBUTE(HYPERVISOR_WC));
}
/* XXX -- account for base */
@@ -2,6 +2,8 @@
#ifndef __ARCH_ARM_MM_MPU__
#define __ARCH_ARM_MM_MPU__
+#include <asm/arm64/mpu.h>
+
#define setup_mm_mappings(boot_phys_offset) ((void)(boot_phys_offset))
/*
* Function setup_static_mappings() sets up MPU memory region mapping
@@ -712,32 +712,100 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
frametable_size - (nr_pdxs * sizeof(struct page_info)));
}
-/* TODO: Implementation on the first usage */
-void dump_hyp_walk(vaddr_t addr)
+static bool region_attribute_match(pr_t *region, unsigned int attributes)
{
+ if ( region->prbar.reg.ap != REGION_AP_MASK(attributes) )
+ {
+ printk(XENLOG_ERR "region permission is not matched (0x%x -> 0x%x)\n",
+ region->prbar.reg.ap, REGION_AP_MASK(attributes));
+ return false;
+ }
+
+ if ( region->prbar.reg.xn != REGION_XN_MASK(attributes) )
+ {
+ printk(XENLOG_ERR "region execution permission is not matched (0x%x -> 0x%x)\n",
+ region->prbar.reg.xn, REGION_XN_MASK(attributes));
+ return false;
+ }
+
+ if ( region->prlar.reg.ai != REGION_AI_MASK(attributes) )
+ {
+ printk(XENLOG_ERR "region memory attributes is not matched (0x%x -> 0x%x)\n",
+ region->prlar.reg.ai, REGION_AI_MASK(attributes));
+ return false;
+ }
+
+ return true;
}
-void __init remove_early_mappings(void)
+static bool check_region_and_attributes(paddr_t pa, size_t len,
+ unsigned int attributes,
+ const char *prefix)
+{
+ pr_t *region;
+ int rc;
+ uint64_t idx;
+
+ rc = mpumap_contain_region(xen_mpumap, max_xen_mpumap, pa, pa + len - 1,
+ &idx);
+ if ( rc != MPUMAP_REGION_FOUND && rc != MPUMAP_REGION_INCLUSIVE )
+ {
+ region_printk("%s: range 0x%"PRIpaddr" - 0x%"PRIpaddr" has not been properly mapped\n",
+ prefix, pa, pa + len - 1);
+ return false;
+ }
+
+ region = &xen_mpumap[idx];
+ /*
+ * For tolerating a few cases where the function is called to remap for
+ * temporary copy and paste, like ioremap_wc in kernel image loading, the
+ * permission mismatch will be treated as warning than error.
+ */
+ if ( !region_attribute_match(region, attributes) )
+ printk(XENLOG_WARNING
+ "mpu: %s: range 0x%"PRIpaddr" - 0x%"PRIpaddr" attributes mismatched\n",
+ prefix, pa, pa + len - 1);
+
+ return true;
+}
+
+/*
+ * This function is normally being used to remap device address ranges
+ * in MMU system.
+ * However, in MPU system, virtual translation is not supported and
+ * device memory is statically configured in FDT, while being mapped at very
+ * early stage.
+ * So here we only add a check to verify this assumption.
+ */
+void *ioremap_attr(paddr_t pa, size_t len, unsigned int attributes)
{
+ if ( !check_region_and_attributes(pa, len, attributes, "ioremap") )
+ return NULL;
+
+ return maddr_to_virt(pa);
}
-int init_secondary_pagetables(int cpu)
+void *ioremap(paddr_t pa, size_t len)
{
- return -ENOSYS;
+ return ioremap_attr(pa, len, REGION_HYPERVISOR_NOCACHE);
}
-void mmu_init_secondary_cpu(void)
+/* TODO: Implementation on the first usage */
+void dump_hyp_walk(vaddr_t addr)
{
}
-void *ioremap_attr(paddr_t pa, size_t len, unsigned int attributes)
+void __init remove_early_mappings(void)
{
- return NULL;
}
-void *ioremap(paddr_t pa, size_t len)
+int init_secondary_pagetables(int cpu)
+{
+ return -ENOSYS;
+}
+
+void mmu_init_secondary_cpu(void)
{
- return NULL;
}
int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int flags)
@@ -89,15 +89,27 @@ static inline void vfree(void *va)
ASSERT_UNREACHABLE();
}
+#ifdef CONFIG_HAS_MPU
+void __iomem *ioremap(paddr_t, size_t);
+#else
void __iomem *ioremap(paddr_t, size_t)
{
ASSERT_UNREACHABLE();
return NULL;
}
+#endif
static inline void iounmap(void __iomem *va)
{
+#ifdef CONFIG_HAS_MPU
+ /*
+ * iounmap and ioremap are a couple, and as ioremap is only doing
+ * checking in MPU system, we do nothing and just return in iounmap
+ */
+ return;
+#else
ASSERT_UNREACHABLE();
+#endif
}
static inline void *arch_vmap_virt_end(void)