@@ -202,7 +202,7 @@ to appropriate auditing by Xen. Argo is disabled by default.
This option is disabled by default, to protect domains from a DoS by a
buggy or malicious other domain spamming the ring.
-### asi (x86)
+### asi (arm64, x86)
> `= <boolean>`
> Default: `false`
@@ -8,6 +8,7 @@ config ARM_64
select 64BIT
select HAS_FAST_MULTIPLY
select HAS_LLC_COLORING if !NUMA
+ select HAS_ONDEMAND_DIRECTMAP
config ARM
def_bool y
@@ -3,6 +3,7 @@
#include <xen/init.h>
#include <xen/llc-coloring.h>
#include <xen/mm.h>
+#include <xen/param.h>
#include <xen/pfn.h>
#include <asm/domain_page.h>
@@ -14,6 +15,11 @@
#undef virt_to_mfn
#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
+#ifdef CONFIG_ONDEMAND_DIRECTMAP
+bool __ro_after_init opt_ondemand_dmap;
+boolean_param("asi", opt_ondemand_dmap);
+#endif
+
static DEFINE_PAGE_TABLE(xen_first_id);
static DEFINE_PAGE_TABLE(xen_second_id);
static DEFINE_PAGE_TABLE(xen_third_id);
@@ -204,16 +210,27 @@ void __init switch_ttbr(uint64_t ttbr)
update_identity_mapping(false);
}
-/* Map the region in the directmap area. */
+/*
+ * This either populate a valid directmap, or allocates empty L1 tables
+ * and creates the L0 entries for the given region in the direct map
+ * depending on has_directmap().
+ *
+ * When asi=true, we still need to populate empty L1 tables in the
+ * directmap region. The reason is that the root page-table (i.e. L0)
+ * is per-CPU and secondary CPUs will initialize their root page-table
+ * based on the pCPU0 one. So L0 entries will be shared if they are
+ * pre-populated. We also rely on the fact that L1 tables are never
+ * freed.
+ */
static void __init setup_directmap_mappings(unsigned long base_mfn,
unsigned long nr_mfns)
{
+ unsigned long mfn_gb = base_mfn & ~((FIRST_SIZE >> PAGE_SHIFT) - 1);
int rc;
/* First call sets the directmap physical and virtual offset. */
if ( mfn_eq(directmap_mfn_start, INVALID_MFN) )
{
- unsigned long mfn_gb = base_mfn & ~((FIRST_SIZE >> PAGE_SHIFT) - 1);
directmap_mfn_start = _mfn(base_mfn);
directmap_base_pdx = mfn_to_pdx(_mfn(base_mfn));
@@ -234,6 +251,24 @@ static void __init setup_directmap_mappings(unsigned long base_mfn,
panic("cannot add directmap mapping at %lx below heap start %lx\n",
base_mfn, mfn_x(directmap_mfn_start));
+ if ( !has_directmap() )
+ {
+ vaddr_t vaddr = (vaddr_t)__mfn_to_virt(base_mfn);
+ lpae_t *root = this_cpu(xen_pgtable);
+ unsigned int i, slot;
+
+ slot = first_table_offset(vaddr);
+ nr_mfns += base_mfn - mfn_gb;
+ for ( i = 0; i < nr_mfns; i += BIT(XEN_PT_LEVEL_ORDER(0), UL), slot++ )
+ {
+ lpae_t *entry = &root[slot];
+
+ if ( !lpae_is_valid(*entry) && !create_xen_table(entry) )
+ panic("Unable to populate zeroeth slot %u\n", slot);
+ }
+ return;
+ }
+
rc = map_pages_to_xen((vaddr_t)__mfn_to_virt(base_mfn),
_mfn(base_mfn), nr_mfns,
PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
@@ -3,13 +3,10 @@
extern DEFINE_PAGE_TABLE(xen_pgtable);
-/*
- * On ARM64, all the RAM is currently direct mapped in Xen.
- * Hence return always true.
- */
+/* On Arm64, the user can chose whether all the RAM is directmap. */
static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
{
- return true;
+ return has_directmap();
}
void arch_setup_page_tables(void);
@@ -346,6 +346,8 @@ void asmlinkage __init start_xen(unsigned long fdt_paddr)
device_tree_flattened = early_fdt_map(fdt_paddr);
setup_mm();
+ printk("Booting with directmap: %s\n",
+ has_directmap() ? "full" : "on demand");
vm_init();