@@ -512,6 +512,9 @@
/* MPU Protection Region Enable Register encode */
#define PRENR_EL2 S3_4_C6_C1_1
+/* Virtualization System Control Register */
+#define VSCTLR_EL2 S3_4_C2_C0_0
+
/* Virtualization Secure Translation Control Register */
#define VSTCR_EL2 S3_4_C2_C6_2
#define VSTCR_EL2_RES1_SHIFT 31
@@ -99,22 +99,26 @@
* [7] Region Present
* [8] Transient Region, e.g. MPU memory region is temproraily
* mapped for a short time
+ * [9] P2M Region for stage 2 translation
*/
#define _PAGE_AI_BIT 0
#define _PAGE_XN_BIT 3
#define _PAGE_AP_BIT 5
#define _PAGE_PRESENT_BIT 7
#define _PAGE_TRANSIENT_BIT 8
+#define _PAGE_P2M_BIT 9
#define _PAGE_AI (7U << _PAGE_AI_BIT)
#define _PAGE_XN (2U << _PAGE_XN_BIT)
#define _PAGE_RO (2U << _PAGE_AP_BIT)
#define _PAGE_PRESENT (1U << _PAGE_PRESENT_BIT)
#define _PAGE_TRANSIENT (1U << _PAGE_TRANSIENT_BIT)
+#define _PAGE_P2M (1U << _PAGE_P2M_BIT)
#define PAGE_AI_MASK(x) (((x) >> _PAGE_AI_BIT) & 0x7U)
#define PAGE_XN_MASK(x) (((x) >> _PAGE_XN_BIT) & 0x3U)
#define PAGE_AP_MASK(x) (((x) >> _PAGE_AP_BIT) & 0x3U)
#define PAGE_RO_MASK(x) (((x) >> _PAGE_AP_BIT) & 0x2U)
#define PAGE_TRANSIENT_MASK(x) (((x) >> _PAGE_TRANSIENT_BIT) & 0x1U)
+#define PAGE_P2M_MASK(x) (((x) >> _PAGE_P2M_BIT) & 0x1U)
#endif /* CONFIG_HAS_MPU */
/*
@@ -580,7 +580,11 @@ int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags)
{
int rc;
- if ( flags_has_rwx(flags) )
+ /*
+ * Mappings should not be both Writeable and Executable, unless
+ * it is for guest P2M mapping.
+ */
+ if ( flags_has_rwx(flags) && !PAGE_P2M_MASK(flags) )
{
region_printk("Mappings should not be both Writeable and Executable\n");
return -EINVAL;
@@ -411,6 +411,67 @@ mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
return p2m_get_mpu_region(p2m, gfn, 1, t, valid);
}
+static unsigned int build_p2m_memory_region_flags(pr_t *p2m_region)
+{
+ return (p2m_region->prlar.reg.ai << _PAGE_AI_BIT |
+ p2m_region->prbar.reg.ap << _PAGE_AP_BIT |
+ p2m_region->prbar.reg.xn << _PAGE_XN_BIT);
+}
+
+static int p2m_xenmpu_update(struct p2m_domain *p2m, bool online)
+{
+ pr_t *p2m_table;
+ unsigned int i = 0;
+ unsigned int flags = online ? (_PAGE_PRESENT | _PAGE_P2M) : 0;
+
+ p2m_table = (pr_t *)page_to_virt(p2m->root);
+ if ( !p2m_table )
+ return -EINVAL;
+
+ for ( ; i < p2m->nr_regions; i++ )
+ {
+ paddr_t base = pr_get_base(&p2m_table[i]);
+ paddr_t limit = pr_get_limit(&p2m_table[i]);
+ unsigned int region_flags;
+
+ region_flags = build_p2m_memory_region_flags(&p2m_table[i]) | flags;
+ if ( xen_mpumap_update(base, limit + 1, region_flags) )
+ {
+ printk(XENLOG_G_ERR "p2m: unable to update MPU memory mapping with P2M region 0x%"PRIpaddr"-0x%"PRIpaddr"\n",
+ base, limit + 1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* p2m_save_state and p2m_restore_state work in pair. */
+void p2m_save_state(struct vcpu *p)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(p->domain);
+
+ p->arch.sctlr = READ_SYSREG(SCTLR_EL1);
+
+ if ( p2m_xenmpu_update(p2m, false) )
+ panic("Failed to offline P2M MPU memory mapping\n");
+}
+
+void p2m_restore_state(struct vcpu *n)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(n->domain);
+ uint8_t *last_vcpu_ran = &p2m->last_vcpu_ran[smp_processor_id()];
+
+ WRITE_SYSREG(n->arch.sctlr, SCTLR_EL1);
+ WRITE_SYSREG(n->arch.hcr_el2, HCR_EL2);
+
+ WRITE_SYSREG64(p2m->vsctlr, VSCTLR_EL2);
+ if ( p2m_xenmpu_update(p2m, true) )
+ panic("Failed to online P2M MPU memory mapping\n");
+
+ *last_vcpu_ran = n->vcpu_id;
+}
+
/*
* Local variables:
* mode: C