@@ -507,6 +507,12 @@
/* MPU Protection Region Enable Register encode */
#define PRENR_EL2 S3_4_C6_C1_1
+/* Virtualization Secure Translation Control Register */
+#define VSTCR_EL2 S3_4_C2_C6_2
+#define VSTCR_EL2_RES1_SHIFT 31
+#define VSTCR_EL2_SA_SHIFT 30
+#define VSTCR_EL2_SC_SHIFT 20
+
#endif
#ifdef CONFIG_ARM_SECURE_STATE
@@ -244,6 +244,12 @@ struct cpuinfo_arm {
unsigned long tgranule_16K:4;
unsigned long tgranule_64K:4;
unsigned long tgranule_4K:4;
+#ifdef CONFIG_ARM_V8R
+ unsigned long __res:16;
+ unsigned long msa:4;
+ unsigned long msa_frac:4;
+ unsigned long __res0:8;
+#else
unsigned long tgranule_16k_2:4;
unsigned long tgranule_64k_2:4;
unsigned long tgranule_4k_2:4;
@@ -251,6 +257,7 @@ struct cpuinfo_arm {
unsigned long __res0:8;
unsigned long fgt:4;
unsigned long ecv:4;
+#endif
/* MMFR1 */
unsigned long hafdbs:4;
@@ -14,9 +14,27 @@
/* Holds the bit size of IPAs in p2m tables. */
extern unsigned int p2m_ipa_bits;
+#define MAX_VMID_8_BIT (1UL << 8)
+#define MAX_VMID_16_BIT (1UL << 16)
+
+#define INVALID_VMID 0 /* VMID 0 is reserved */
+
+#ifdef CONFIG_ARM_64
+extern unsigned int max_vmid;
+/* VMID is by default 8 bit width on AArch64 */
+#define MAX_VMID max_vmid
+#else
+/* VMID is always 8 bit width on AArch32 */
+#define MAX_VMID MAX_VMID_8_BIT
+#endif
+
+extern spinlock_t vmid_alloc_lock;
+extern unsigned long *vmid_mask;
+
struct domain;
extern void memory_type_changed(struct domain *);
+extern void p2m_vmid_allocator_init(void);
/* Per-p2m-table state */
struct p2m_domain {
@@ -388,6 +388,12 @@
#define VTCR_RES1 (_AC(1,UL)<<31)
+#ifdef CONFIG_ARM_V8R
+#define VTCR_MSA_VMSA (_AC(0x1,UL)<<31)
+#define VTCR_MSA_PMSA ~(_AC(0x1,UL)<<31)
+#define NSA_SEL2 ~(_AC(0x1,UL)<<30)
+#endif
+
/* HCPTR Hyp. Coprocessor Trap Register */
#define HCPTR_TAM ((_AC(1,U)<<30))
#define HCPTR_TTA ((_AC(1,U)<<20)) /* Trap trace registers */
@@ -447,6 +453,13 @@
#define MM64_VMID_16_BITS_SUPPORT 0x2
#endif
+#ifdef CONFIG_ARM_V8R
+#define MM64_MSA_PMSA_SUPPORT 0xf
+#define MM64_MSA_FRAC_NONE_SUPPORT 0x0
+#define MM64_MSA_FRAC_PMSA_SUPPORT 0x1
+#define MM64_MSA_FRAC_VMSA_SUPPORT 0x2
+#endif
+
#ifndef __ASSEMBLY__
extern register_t __cpu_logical_map[];
@@ -4,6 +4,21 @@
#include <asm/event.h>
#include <asm/page.h>
+#include <asm/p2m.h>
+
+#ifdef CONFIG_ARM_64
+unsigned int __read_mostly max_vmid = MAX_VMID_8_BIT;
+#endif
+
+spinlock_t vmid_alloc_lock = SPIN_LOCK_UNLOCKED;
+
+/*
+ * VTTBR_EL2 VMID field is 8 or 16 bits. AArch64 may support 16-bit VMID.
+ * Using a bitmap here limits us to 256 or 65536 (for AArch64) concurrent
+ * domains. The bitmap space will be allocated dynamically based on
+ * whether 8 or 16 bit VMIDs are supported.
+ */
+unsigned long *vmid_mask;
/*
* Set to the maximum configured support for IPA bits, so the number of IPA bits can be
@@ -142,6 +157,19 @@ void __init p2m_restrict_ipa_bits(unsigned int ipa_bits)
p2m_ipa_bits = ipa_bits;
}
+void p2m_vmid_allocator_init(void)
+{
+ /*
+ * allocate space for vmid_mask based on MAX_VMID
+ */
+ vmid_mask = xzalloc_array(unsigned long, BITS_TO_LONGS(MAX_VMID));
+
+ if ( !vmid_mask )
+ panic("Could not allocate VMID bitmap space\n");
+
+ set_bit(INVALID_VMID, vmid_mask);
+}
+
/*
* Local variables:
* mode: C
@@ -14,20 +14,6 @@
#include <asm/page.h>
#include <asm/traps.h>
-#define MAX_VMID_8_BIT (1UL << 8)
-#define MAX_VMID_16_BIT (1UL << 16)
-
-#define INVALID_VMID 0 /* VMID 0 is reserved */
-
-#ifdef CONFIG_ARM_64
-static unsigned int __read_mostly max_vmid = MAX_VMID_8_BIT;
-/* VMID is by default 8 bit width on AArch64 */
-#define MAX_VMID max_vmid
-#else
-/* VMID is always 8 bit width on AArch32 */
-#define MAX_VMID MAX_VMID_8_BIT
-#endif
-
#ifdef CONFIG_ARM_64
unsigned int __read_mostly p2m_root_order;
unsigned int __read_mostly p2m_root_level;
@@ -1516,30 +1502,6 @@ static int p2m_alloc_table(struct domain *d)
return 0;
}
-
-static spinlock_t vmid_alloc_lock = SPIN_LOCK_UNLOCKED;
-
-/*
- * VTTBR_EL2 VMID field is 8 or 16 bits. AArch64 may support 16-bit VMID.
- * Using a bitmap here limits us to 256 or 65536 (for AArch64) concurrent
- * domains. The bitmap space will be allocated dynamically based on
- * whether 8 or 16 bit VMIDs are supported.
- */
-static unsigned long *vmid_mask;
-
-static void p2m_vmid_allocator_init(void)
-{
- /*
- * allocate space for vmid_mask based on MAX_VMID
- */
- vmid_mask = xzalloc_array(unsigned long, BITS_TO_LONGS(MAX_VMID));
-
- if ( !vmid_mask )
- panic("Could not allocate VMID bitmap space\n");
-
- set_bit(INVALID_VMID, vmid_mask);
-}
-
static int p2m_alloc_vmid(struct domain *d)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
@@ -2,8 +2,95 @@
#include <xen/lib.h>
#include <xen/mm-frame.h>
#include <xen/sched.h>
+#include <xen/warning.h>
#include <asm/p2m.h>
+#include <asm/processor.h>
+#include <asm/sysregs.h>
+
+void __init setup_virt_paging(void)
+{
+ uint64_t val = 0;
+ bool p2m_vmsa = true;
+
+ /* PA size */
+ const unsigned int pa_range_info[] = { 32, 36, 40, 42, 44, 48, 52, 0, /* Invalid */ };
+
+ /*
+ * Restrict "p2m_ipa_bits" if needed. As P2M table is always configured
+ * with IPA bits == PA bits, compare against "pabits".
+ */
+ if ( pa_range_info[system_cpuinfo.mm64.pa_range] < p2m_ipa_bits )
+ p2m_ipa_bits = pa_range_info[system_cpuinfo.mm64.pa_range];
+
+ /* In ARMV8R, hypervisor in secure EL2. */
+ val &= NSA_SEL2;
+
+ /*
+ * ARMv8-R AArch64 could have the following memory system
+ * configurations:
+ * - PMSAv8-64 at EL1 and EL2
+ * - PMSAv8-64 or VMSAv8-64 at EL1 and PMSAv8-64 at EL2
+ *
+ * In ARMv8-R, the only permitted value is
+ * 0b1111(MM64_MSA_PMSA_SUPPORT).
+ */
+ if ( system_cpuinfo.mm64.msa == MM64_MSA_PMSA_SUPPORT )
+ {
+ if ( system_cpuinfo.mm64.msa_frac == MM64_MSA_FRAC_NONE_SUPPORT )
+ goto fault;
+
+ if ( system_cpuinfo.mm64.msa_frac != MM64_MSA_FRAC_VMSA_SUPPORT )
+ {
+ p2m_vmsa = false;
+ warning_add("Be aware of that there is no support for VMSAv8-64 at EL1 on this platform.\n");
+ }
+ }
+ else
+ goto fault;
+
+ /*
+ * If the platform supports both PMSAv8-64 or VMSAv8-64 at EL1,
+ * then it's VTCR_EL2.MSA that determines the EL1 memory system
+ * architecture.
+ * Normally, we set the initial VTCR_EL2.MSA value VMSAv8-64 support,
+ * unless this platform only supports PMSAv8-64.
+ */
+ if ( !p2m_vmsa )
+ val &= VTCR_MSA_PMSA;
+ else
+ val |= VTCR_MSA_VMSA;
+
+ /*
+ * cpuinfo sanitization makes sure we support 16bits VMID only if
+ * all cores are supporting it.
+ */
+ if ( system_cpuinfo.mm64.vmid_bits == MM64_VMID_16_BITS_SUPPORT )
+ max_vmid = MAX_VMID_16_BIT;
+
+ /* Set the VS bit only if 16 bit VMID is supported. */
+ if ( MAX_VMID == MAX_VMID_16_BIT )
+ val |= VTCR_VS;
+
+ p2m_vmid_allocator_init();
+
+ WRITE_SYSREG(val, VTCR_EL2);
+
+ /*
+ * All stage 2 translations for the Secure PA space access the
+ * Secure PA space, so we keep SA bit as 0.
+ *
+ * Stage 2 NS configuration is checked against stage 1 NS configuration
+ * in EL1&0 translation regime for the given address, and generate a
+ * fault if they are different. So we set SC bit as 1.
+ */
+ WRITE_SYSREG(1 << VSTCR_EL2_RES1_SHIFT | 1 << VSTCR_EL2_SC_SHIFT, VTCR_EL2);
+
+ return;
+
+fault:
+ panic("Hardware with no PMSAv8-64 support in any translation regime.\n");
+}
/* TODO: Implement on the first usage */
void p2m_write_unlock(struct p2m_domain *p2m)
@@ -177,10 +264,6 @@ struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va,
return NULL;
}
-void __init setup_virt_paging(void)
-{
-}
-
/*
* Local variables:
* mode: C